aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/bpf/btf.rst870
-rw-r--r--Documentation/bpf/index.rst7
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ksz.txt102
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ethqos.txt64
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-qoriq.txt2
-rw-r--r--Documentation/networking/devlink-params-mlxsw.txt2
-rw-r--r--Documentation/networking/dsa/dsa.txt13
-rw-r--r--Documentation/networking/filter.txt15
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/phy.rst447
-rw-r--r--Documentation/networking/phy.txt427
-rw-r--r--Documentation/networking/snmp_counter.rst111
-rw-r--r--Documentation/networking/switchdev.txt2
-rw-r--r--Documentation/sysctl/net.txt14
-rw-r--r--MAINTAINERS20
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi1
-rw-r--r--arch/arm/mach-orion5x/common.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-ge-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c2
-rw-r--r--arch/arm/mach-orion5x/wnr854t-setup.c2
-rw-r--r--arch/arm/mach-orion5x/wrt350n-v2-setup.c2
-rw-r--r--arch/arm/net/bpf_jit_32.c53
-rw-r--r--arch/arm/net/bpf_jit_32.h2
-rw-r--r--arch/arm/plat-orion/common.c2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c37
-rw-r--r--arch/ia64/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/net/bpf_jit.h4
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c120
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
-rw-r--r--arch/s390/net/bpf_jit_comp.c66
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/x86/net/bpf_jit_comp.c46
-rw-r--r--arch/x86/net/bpf_jit_comp32.c121
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c4
-rw-r--r--drivers/isdn/hisax/netjet.c6
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/isdn/hisax/st5481.h2
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c188
-rw-r--r--drivers/net/bonding/bond_netlink.c67
-rw-r--r--drivers/net/dsa/lantiq_gswip.c24
-rw-r--r--drivers/net/dsa/qca8k.c16
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c178
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c47
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c6
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c237
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h31
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c98
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h14
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig19
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile15
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1604
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h230
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c210
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c597
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h532
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_msg.c164
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c931
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h49
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c255
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c288
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c85
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c270
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c135
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c44
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c13
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c28
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c244
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c110
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c160
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c680
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c135
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c81
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c156
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c24
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/jme.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c157
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c47
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c217
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h17
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c139
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c109
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h89
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c612
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c158
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c126
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c115
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c292
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/realtek/atp.c27
-rw-r--r--drivers/net/ethernet/realtek/r8169.c647
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c14
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c56
-rw-r--r--drivers/net/ethernet/sfc/mtd.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c545
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c36
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c4
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/phy/amd.c7
-rw-r--r--drivers/net/phy/aquantia.c5
-rw-r--r--drivers/net/phy/at803x.c44
-rw-r--r--drivers/net/phy/bcm-cygnus.c10
-rw-r--r--drivers/net/phy/bcm-phy-lib.c10
-rw-r--r--drivers/net/phy/bcm-phy-lib.h10
-rw-r--r--drivers/net/phy/bcm63xx.c6
-rw-r--r--drivers/net/phy/bcm7xxx.c6
-rw-r--r--drivers/net/phy/bcm87xx.c7
-rw-r--r--drivers/net/phy/broadcom.c6
-rw-r--r--drivers/net/phy/cicada.c7
-rw-r--r--drivers/net/phy/cortina.c11
-rw-r--r--drivers/net/phy/davicom.c7
-rw-r--r--drivers/net/phy/dp83640.c15
-rw-r--r--drivers/net/phy/dp83822.c12
-rw-r--r--drivers/net/phy/dp83848.c12
-rw-r--r--drivers/net/phy/dp83867.c12
-rw-r--r--drivers/net/phy/et1011c.c7
-rw-r--r--drivers/net/phy/fixed_phy.c6
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/intel-xway.c11
-rw-r--r--drivers/net/phy/lxt.c7
-rw-r--r--drivers/net/phy/marvell.c7
-rw-r--r--drivers/net/phy/marvell10g.c1
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c10
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c6
-rw-r--r--drivers/net/phy/mdio-bitbang.c7
-rw-r--r--drivers/net/phy/mdio-boardinfo.c6
-rw-r--r--drivers/net/phy/mdio-cavium.c7
-rw-r--r--drivers/net/phy/mdio-cavium.h5
-rw-r--r--drivers/net/phy/mdio-gpio.c7
-rw-r--r--drivers/net/phy/mdio-i2c.c5
-rw-r--r--drivers/net/phy/mdio-i2c.h5
-rw-r--r--drivers/net/phy/mdio-moxart.c7
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c13
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c7
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c5
-rw-r--r--drivers/net/phy/mdio-mux.c7
-rw-r--r--drivers/net/phy/mdio-octeon.c7
-rw-r--r--drivers/net/phy/mdio-sun4i.c7
-rw-r--r--drivers/net/phy/mdio-thunder.c7
-rw-r--r--drivers/net/phy/mdio-xgene.c14
-rw-r--r--drivers/net/phy/mdio-xgene.h14
-rw-r--r--drivers/net/phy/mdio_bus.c9
-rw-r--r--drivers/net/phy/mdio_device.c7
-rw-r--r--drivers/net/phy/meson-gxl.c12
-rw-r--r--drivers/net/phy/micrel.c49
-rw-r--r--drivers/net/phy/microchip.c14
-rw-r--r--drivers/net/phy/mscc.c1
-rw-r--r--drivers/net/phy/national.c7
-rw-r--r--drivers/net/phy/phy-core.c6
-rw-r--r--drivers/net/phy/phy.c96
-rw-r--r--drivers/net/phy/phy_device.c122
-rw-r--r--drivers/net/phy/phy_led_triggers.c14
-rw-r--r--drivers/net/phy/phylink.c12
-rw-r--r--drivers/net/phy/qsemi.c7
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/sfp.c1
-rw-r--r--drivers/net/phy/smsc.c6
-rw-r--r--drivers/net/phy/spi_ks8995.c5
-rw-r--r--drivers/net/phy/ste10Xp.c7
-rw-r--r--drivers/net/phy/swphy.c6
-rw-r--r--drivers/net/phy/teranetics.c5
-rw-r--r--drivers/net/phy/uPD60620.c7
-rw-r--r--drivers/net/phy/vitesse.c9
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c11
-rw-r--r--drivers/net/sb1000.c11
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/virtio_net.c1
-rw-r--r--drivers/net/vxlan.c352
-rw-r--r--drivers/net/wan/dscc4.c50
-rw-r--r--drivers/net/wan/lmc/Makefile2
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/Makefile4
-rw-r--r--drivers/ptp/ptp_qoriq.c129
-rw-r--r--drivers/ptp/ptp_qoriq_debugfs.c101
-rw-r--r--drivers/s390/net/qeth_core.h14
-rw-r--r--drivers/s390/net/qeth_core_main.c132
-rw-r--r--drivers/s390/net/qeth_core_mpc.h1
-rw-r--r--drivers/s390/net/qeth_l2_main.c244
-rw-r--r--drivers/s390/net/qeth_l3_main.c113
-rw-r--r--include/linux/bpf.h10
-rw-r--r--include/linux/bpf_verifier.h6
-rw-r--r--include/linux/filter.h21
-rw-r--r--include/linux/fsl/ptp_qoriq.h15
-rw-r--r--include/linux/igmp.h11
-rw-r--r--include/linux/in.h5
-rw-r--r--include/linux/ip.h5
-rw-r--r--include/linux/ipv6.h6
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h17
-rw-r--r--include/linux/netfilter_ipv4.h6
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/platform_data/b53.h2
-rw-r--r--include/linux/platform_data/dsa.h68
-rw-r--r--include/linux/platform_data/mv88e6xxx.h2
-rw-r--r--include/linux/qed/qed_if.h21
-rw-r--r--include/linux/qed/qede_rdma.h10
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/net/addrconf.h27
-rw-r--r--include/net/bond_3ad.h17
-rw-r--r--include/net/dsa.h66
-rw-r--r--include/net/inet_connection_sock.h29
-rw-r--r--include/net/inet_frag.h16
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/ipv6_frag.h11
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/netfilter/br_netfilter.h1
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_conntrack_core.h5
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h122
-rw-r--r--include/net/netfilter/nf_nat.h4
-rw-r--r--include/net/netfilter/nf_tables.h26
-rw-r--r--include/net/netfilter/nf_tables_core.h16
-rw-r--r--include/net/netns/conntrack.h30
-rw-r--r--include/net/netns/xdp.h13
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/switchdev.h6
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/net/tls.h3
-rw-r--r--include/net/vxlan.h6
-rw-r--r--include/net/xdp_sock.h1
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/linux/bpf.h2
-rw-r--r--include/uapi/linux/icmpv6.h2
-rw-r--r--include/uapi/linux/if_bonding.h24
-rw-r--r--include/uapi/linux/if_link.h1
-rw-r--r--include/uapi/linux/igmp.h1
-rw-r--r--include/uapi/linux/in.h9
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h10
-rw-r--r--include/uapi/linux/pkt_cls.h13
-rw-r--r--include/uapi/linux/xdp_diag.h72
-rw-r--r--kernel/bpf/btf.c104
-rw-r--r--kernel/bpf/core.c273
-rw-r--r--kernel/bpf/disasm.c34
-rw-r--r--kernel/bpf/offload.c35
-rw-r--r--kernel/bpf/verifier.c624
-rw-r--r--net/batman-adv/multicast.c4
-rw-r--r--net/bpf/test_run.c82
-rw-r--r--net/bridge/br_fdb.c3
-rw-r--r--net/bridge/br_multicast.c183
-rw-r--r--net/bridge/br_netfilter_hooks.c5
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/bridge/br_switchdev.c2
-rw-r--r--net/core/dst.c6
-rw-r--r--net/core/filter.c22
-rw-r--r--net/core/flow_dissector.c92
-rw-r--r--net/core/net_namespace.c38
-rw-r--r--net/core/rtnetlink.c116
-rw-r--r--net/core/skmsg.c25
-rw-r--r--net/core/sock.c47
-rw-r--r--net/core/sysctl_net_core.c18
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/timer.c4
-rw-r--r--net/decnet/dn_fib.c2
-rw-r--r--net/dsa/dsa_priv.h3
-rw-r--r--net/dsa/master.c56
-rw-r--r--net/dsa/slave.c5
-rw-r--r--net/ipv4/devinet.c86
-rw-r--r--net/ipv4/igmp.c68
-rw-r--r--net/ipv4/inet_fragment.c293
-rw-r--r--net/ipv4/ip_fragment.c289
-rw-r--r--net/ipv4/ip_gre.c112
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_tunnel.c62
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c61
-rw-r--r--net/ipv4/netfilter.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c2
-rw-r--r--net/ipv4/route.c72
-rw-r--r--net/ipv4/tcp.c42
-rw-r--r--net/ipv4/tcp_bbr.c180
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c34
-rw-r--r--net/ipv4/tcp_output.c65
-rw-r--r--net/ipv4/tcp_timer.c87
-rw-r--r--net/ipv4/udp_tunnel.c15
-rw-r--r--net/ipv6/addrconf.c95
-rw-r--r--net/ipv6/addrlabel.c47
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_udp_tunnel.c15
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/mcast_snoop.c83
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c260
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c2
-rw-r--r--net/ipv6/reassembly.c233
-rw-r--r--net/ipv6/route.c70
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/mpls/af_mpls.c103
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c49
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c15
-rw-r--r--net/netfilter/nf_conntrack_core.c210
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c14
-rw-r--r--net/netfilter/nf_conntrack_pptp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto.c514
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c134
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c85
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c196
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c67
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c69
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c128
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c160
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c80
-rw-r--r--net/netfilter/nf_conntrack_standalone.c427
-rw-r--r--net/netfilter/nf_flow_table_core.c2
-rw-r--r--net/netfilter/nf_nat_core.c15
-rw-r--r--net/netfilter/nf_tables_api.c137
-rw-r--r--net/netfilter/nf_tables_core.c25
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c19
-rw-r--r--net/netfilter/nft_bitwise.c5
-rw-r--r--net/netfilter/nft_byteorder.c6
-rw-r--r--net/netfilter/nft_cmp.c6
-rw-r--r--net/netfilter/nft_counter.c2
-rw-r--r--net/netfilter/nft_ct.c4
-rw-r--r--net/netfilter/nft_dynset.c5
-rw-r--r--net/netfilter/nft_hash.c121
-rw-r--r--net/netfilter/nft_immediate.c6
-rw-r--r--net/netfilter/nft_meta.c12
-rw-r--r--net/netfilter/nft_objref.c5
-rw-r--r--net/netfilter/nft_payload.c6
-rw-r--r--net/netfilter/nft_quota.c2
-rw-r--r--net/netfilter/nft_range.c5
-rw-r--r--net/netfilter/nft_rt.c6
-rw-r--r--net/netfilter/utils.c19
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/netfilter/xt_physdev.c9
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/openvswitch/conntrack.c2
-rw-r--r--net/openvswitch/meter.c3
-rw-r--r--net/sched/cls_basic.c25
-rw-r--r--net/sched/cls_matchall.c24
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/switchdev/switchdev.c5
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/tipc/topsrv.c3
-rw-r--r--net/tipc/trace.c4
-rw-r--r--net/tls/tls_main.c73
-rw-r--r--net/tls/tls_sw.c280
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/xdp/Kconfig8
-rw-r--r--net/xdp/Makefile1
-rw-r--r--net/xdp/xdp_umem.c14
-rw-r--r--net/xdp/xsk.c36
-rw-r--r--net/xdp/xsk.h12
-rw-r--r--net/xdp/xsk_diag.c191
-rw-r--r--samples/bpf/bpf_insn.h20
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst85
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst29
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst1
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool110
-rw-r--r--tools/bpf/bpftool/btf_dumper.c98
-rw-r--r--tools/bpf/bpftool/cfg.c9
-rw-r--r--tools/bpf/bpftool/feature.c764
-rw-r--r--tools/bpf/bpftool/main.c3
-rw-r--r--tools/bpf/bpftool/main.h4
-rw-r--r--tools/bpf/bpftool/map.c232
-rw-r--r--tools/bpf/bpftool/prog.c10
-rw-r--r--tools/include/linux/filter.h20
-rw-r--r--tools/include/uapi/linux/bpf.h2
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile17
-rw-r--r--tools/lib/bpf/libbpf.c46
-rw-r--r--tools/lib/bpf/libbpf.h14
-rw-r--r--tools/lib/bpf/libbpf.map7
-rw-r--r--tools/lib/bpf/libbpf_probes.c242
-rw-r--r--tools/testing/selftests/bpf/Makefile112
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c43
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h55
-rw-r--r--tools/testing/selftests/bpf/test_btf.c680
-rw-r--r--tools/testing/selftests/bpf/test_flow_dissector.c2
-rw-r--r--tools/testing/selftests/bpf/test_maps.c1
-rw-r--r--tools/testing/selftests/bpf/test_progs.c78
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c4
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c1
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c6
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c15426
-rw-r--r--tools/testing/selftests/bpf/verifier/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c50
-rw-r--r--tools/testing/selftests/bpf/verifier/array_access.c219
-rw-r--r--tools/testing/selftests/bpf/verifier/basic.c23
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_call.c50
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_instr.c134
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stack.c64
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stx_ldx.c45
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c508
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_deduction.c124
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c406
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_get_stack.c44
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c1942
-rw-r--r--tools/testing/selftests/bpf/verifier/cfg.c70
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_skb.c197
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_storage.c220
-rw-r--r--tools/testing/selftests/bpf/verifier/const_or.c60
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c93
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_msg.c180
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c1033
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c159
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c633
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c40
-rw-r--r--tools/testing/selftests/bpf/verifier/div0.c184
-rw-r--r--tools/testing/selftests/bpf/verifier/div_overflow.c104
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_access_var_len.c614
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_packet_access.c460
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_value_access.c953
-rw-r--r--tools/testing/selftests/bpf/verifier/jit.c88
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c724
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c165
-rw-r--r--tools/testing/selftests/bpf/verifier/jump.c180
-rw-r--r--tools/testing/selftests/bpf/verifier/junk_insn.c45
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_abs.c286
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_dw.c36
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_imm64.c141
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_ind.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/leak_ptr.c67
-rw-r--r--tools/testing/selftests/bpf/verifier/lwt.c189
-rw-r--r--tools/testing/selftests/bpf/verifier/map_in_map.c62
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ptr_mixing.c100
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ret_val.c65
-rw-r--r--tools/testing/selftests/bpf/verifier/masking.c322
-rw-r--r--tools/testing/selftests/bpf/verifier/meta_access.c235
-rw-r--r--tools/testing/selftests/bpf/verifier/perf_event_sample_period.c59
-rw-r--r--tools/testing/selftests/bpf/verifier/prevent_map_lookup.c74
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_stack.c305
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c607
-rw-r--r--tools/testing/selftests/bpf/verifier/runtime_jit.c80
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c156
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c75
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c317
-rw-r--r--tools/testing/selftests/bpf/verifier/uninit.c39
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c521
-rw-r--r--tools/testing/selftests/bpf/verifier/value.c104
-rw-r--r--tools/testing/selftests/bpf/verifier/value_adj_spill.c43
-rw-r--r--tools/testing/selftests/bpf/verifier/value_illegal_alu.c94
-rw-r--r--tools/testing/selftests/bpf/verifier/value_or_null.c152
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c792
-rw-r--r--tools/testing/selftests/bpf/verifier/var_off.c66
-rw-r--r--tools/testing/selftests/bpf/verifier/xadd.c97
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c900
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh126
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh63
-rw-r--r--tools/testing/selftests/net/forwarding/ipip_lib.sh349
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_asymmetric.sh567
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_symmetric.sh551
-rw-r--r--tools/testing/selftests/net/ip_defrag.c69
-rwxr-xr-xtools/testing/selftests/net/ip_defrag.sh16
-rw-r--r--tools/testing/selftests/net/tls.c12
594 files changed, 39334 insertions, 23304 deletions
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
new file mode 100644
index 000000000000..1d434c3a268d
--- /dev/null
+++ b/Documentation/bpf/btf.rst
@@ -0,0 +1,870 @@
+=====================
+BPF Type Format (BTF)
+=====================
+
+1. Introduction
+***************
+
+BTF (BPF Type Format) is the meta data format which
+encodes the debug info related to BPF program/map.
+The name BTF was used initially to describe
+data types. The BTF was later extended to include
+function info for defined subroutines, and line info
+for source/line information.
+
+The debug info is used for map pretty print, function
+signature, etc. The function signature enables better
+bpf program/function kernel symbol.
+The line info helps generate
+source annotated translated byte code, jited code
+and verifier log.
+
+The BTF specification contains two parts,
+ * BTF kernel API
+ * BTF ELF file format
+
+The kernel API is the contract between
+user space and kernel. The kernel verifies
+the BTF info before using it.
+The ELF file format is a user space contract
+between ELF file and libbpf loader.
+
+The type and string sections are part of the
+BTF kernel API, describing the debug info
+(mostly types related) referenced by the bpf program.
+These two sections are discussed in
+details in :ref:`BTF_Type_String`.
+
+.. _BTF_Type_String:
+
+2. BTF Type and String Encoding
+*******************************
+
+The file ``include/uapi/linux/btf.h`` provides high
+level definition on how types/strings are encoded.
+
+The beginning of data blob must be::
+
+ struct btf_header {
+ __u16 magic;
+ __u8 version;
+ __u8 flags;
+ __u32 hdr_len;
+
+ /* All offsets are in bytes relative to the end of this header */
+ __u32 type_off; /* offset of type section */
+ __u32 type_len; /* length of type section */
+ __u32 str_off; /* offset of string section */
+ __u32 str_len; /* length of string section */
+ };
+
+The magic is ``0xeB9F``, which has different encoding for big and little
+endian system, and can be used to test whether BTF is generated for
+big or little endian target.
+The btf_header is designed to be extensible with hdr_len equal to
+``sizeof(struct btf_header)`` when the data blob is generated.
+
+2.1 String Encoding
+===================
+
+The first string in the string section must be a null string.
+The rest of string table is a concatenation of other null-treminated
+strings.
+
+2.2 Type Encoding
+=================
+
+The type id ``0`` is reserved for ``void`` type.
+The type section is parsed sequentially and the type id is assigned to
+each recognized type starting from id ``1``.
+Currently, the following types are supported::
+
+ #define BTF_KIND_INT 1 /* Integer */
+ #define BTF_KIND_PTR 2 /* Pointer */
+ #define BTF_KIND_ARRAY 3 /* Array */
+ #define BTF_KIND_STRUCT 4 /* Struct */
+ #define BTF_KIND_UNION 5 /* Union */
+ #define BTF_KIND_ENUM 6 /* Enumeration */
+ #define BTF_KIND_FWD 7 /* Forward */
+ #define BTF_KIND_TYPEDEF 8 /* Typedef */
+ #define BTF_KIND_VOLATILE 9 /* Volatile */
+ #define BTF_KIND_CONST 10 /* Const */
+ #define BTF_KIND_RESTRICT 11 /* Restrict */
+ #define BTF_KIND_FUNC 12 /* Function */
+ #define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
+
+Note that the type section encodes debug info, not just pure types.
+``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
+
+Each type contains the following common data::
+
+ struct btf_type {
+ __u32 name_off;
+ /* "info" bits arrangement
+ * bits 0-15: vlen (e.g. # of struct's members)
+ * bits 16-23: unused
+ * bits 24-27: kind (e.g. int, ptr, array...etc)
+ * bits 28-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
+ */
+ __u32 info;
+ /* "size" is used by INT, ENUM, STRUCT and UNION.
+ * "size" tells the size of the type it is describing.
+ *
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
+ * "type" is a type_id referring to another type.
+ */
+ union {
+ __u32 size;
+ __u32 type;
+ };
+ };
+
+For certain kinds, the common data are followed by kind specific data.
+The ``name_off`` in ``struct btf_type`` specifies the offset in the string table.
+The following details encoding of each kind.
+
+2.2.1 BTF_KIND_INT
+~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: any valid offset
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_INT
+ * ``info.vlen``: 0
+ * ``size``: the size of the int type in bytes.
+
+``btf_type`` is followed by a ``u32`` with following bits arrangement::
+
+ #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
+ #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
+ #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
+
+The ``BTF_INT_ENCODING`` has the following attributes::
+
+ #define BTF_INT_SIGNED (1 << 0)
+ #define BTF_INT_CHAR (1 << 1)
+ #define BTF_INT_BOOL (1 << 2)
+
+The ``BTF_INT_ENCODING()`` provides extra information, signness,
+char, or bool, for the int type. The char and bool encoding
+are mostly useful for pretty print. At most one encoding can
+be specified for the int type.
+
+The ``BTF_INT_BITS()`` specifies the number of actual bits held by
+this int type. For example, a 4-bit bitfield encodes
+``BTF_INT_BITS()`` equals to 4. The ``btf_type.size * 8``
+must be equal to or greater than ``BTF_INT_BITS()`` for the type.
+The maximum value of ``BTF_INT_BITS()`` is 128.
+
+The ``BTF_INT_OFFSET()`` specifies the starting bit offset to
+calculate values for this int. For example, a bitfield struct
+member has
+
+ * btf member bit offset 100 from the start of the structure,
+ * btf member pointing to an int type,
+ * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
+
+Then in the struct memory layout, this member will occupy
+``4`` bits starting from bits ``100 + 2 = 102``.
+
+Alternatively, the bitfield struct member can be the following to
+access the same bits as the above:
+
+ * btf member bit offset 102,
+ * btf member pointing to an int type,
+ * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
+
+The original intention of ``BTF_INT_OFFSET()`` is to provide
+flexibility of bitfield encoding.
+Currently, both llvm and pahole generates ``BTF_INT_OFFSET() = 0``
+for all int types.
+
+2.2.2 BTF_KIND_PTR
+~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_PTR
+ * ``info.vlen``: 0
+ * ``type``: the pointee type of the pointer
+
+No additional type data follow ``btf_type``.
+
+2.2.3 BTF_KIND_ARRAY
+~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_ARRAY
+ * ``info.vlen``: 0
+ * ``size/type``: 0, not used
+
+btf_type is followed by one "struct btf_array"::
+
+ struct btf_array {
+ __u32 type;
+ __u32 index_type;
+ __u32 nelems;
+ };
+
+The ``struct btf_array`` encoding:
+ * ``type``: the element type
+ * ``index_type``: the index type
+ * ``nelems``: the number of elements for this array (``0`` is also allowed).
+
+The ``index_type`` can be any regular int types
+(u8, u16, u32, u64, unsigned __int128).
+The original design of including ``index_type`` follows dwarf
+which has a ``index_type`` for its array type.
+Currently in BTF, beyond type verification, the ``index_type`` is not used.
+
+The ``struct btf_array`` allows chaining through element type to represent
+multiple dimensional arrays. For example, ``int a[5][6]``, the following
+type system illustrates the chaining:
+
+ * [1]: int
+ * [2]: array, ``btf_array.type = [1]``, ``btf_array.nelems = 6``
+ * [3]: array, ``btf_array.type = [2]``, ``btf_array.nelems = 5``
+
+Currently, both pahole and llvm collapse multiple dimensional array
+into one dimensional array, e.g., ``a[5][6]``, the btf_array.nelems
+equal to ``30``. This is because the original use case is map pretty
+print where the whole array is dumped out so one dimensional array
+is enough. As more BTF usage is explored, pahole and llvm can be
+changed to generate proper chained representation for
+multiple dimensional arrays.
+
+2.2.4 BTF_KIND_STRUCT
+~~~~~~~~~~~~~~~~~~~~~
+2.2.5 BTF_KIND_UNION
+~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0 or offset to a valid C identifier
+ * ``info.kind_flag``: 0 or 1
+ * ``info.kind``: BTF_KIND_STRUCT or BTF_KIND_UNION
+ * ``info.vlen``: the number of struct/union members
+ * ``info.size``: the size of the struct/union in bytes
+
+``btf_type`` is followed by ``info.vlen`` number of ``struct btf_member``.::
+
+ struct btf_member {
+ __u32 name_off;
+ __u32 type;
+ __u32 offset;
+ };
+
+``struct btf_member`` encoding:
+ * ``name_off``: offset to a valid C identifier
+ * ``type``: the member type
+ * ``offset``: <see below>
+
+If the type info ``kind_flag`` is not set, the offset contains
+only bit offset of the member. Note that the base type of the
+bitfield can only be int or enum type. If the bitfield size
+is 32, the base type can be either int or enum type.
+If the bitfield size is not 32, the base type must be int,
+and int type ``BTF_INT_BITS()`` encodes the bitfield size.
+
+If the ``kind_flag`` is set, the ``btf_member.offset``
+contains both member bitfield size and bit offset. The
+bitfield size and bit offset are calculated as below.::
+
+ #define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
+ #define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
+
+In this case, if the base type is an int type, it must
+be a regular int type:
+
+ * ``BTF_INT_OFFSET()`` must be 0.
+ * ``BTF_INT_BITS()`` must be equal to ``{1,2,4,8,16} * 8``.
+
+The following kernel patch introduced ``kind_flag`` and
+explained why both modes exist:
+
+ https://github.com/torvalds/linux/commit/9d5f9f701b1891466fb3dbb1806ad97716f95cc3#diff-fa650a64fdd3968396883d2fe8215ff3
+
+2.2.6 BTF_KIND_ENUM
+~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0 or offset to a valid C identifier
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_ENUM
+ * ``info.vlen``: number of enum values
+ * ``size``: 4
+
+``btf_type`` is followed by ``info.vlen`` number of ``struct btf_enum``.::
+
+ struct btf_enum {
+ __u32 name_off;
+ __s32 val;
+ };
+
+The ``btf_enum`` encoding:
+ * ``name_off``: offset to a valid C identifier
+ * ``val``: any value
+
+2.2.7 BTF_KIND_FWD
+~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: offset to a valid C identifier
+ * ``info.kind_flag``: 0 for struct, 1 for union
+ * ``info.kind``: BTF_KIND_FWD
+ * ``info.vlen``: 0
+ * ``type``: 0
+
+No additional type data follow ``btf_type``.
+
+2.2.8 BTF_KIND_TYPEDEF
+~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: offset to a valid C identifier
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_TYPEDEF
+ * ``info.vlen``: 0
+ * ``type``: the type which can be referred by name at ``name_off``
+
+No additional type data follow ``btf_type``.
+
+2.2.9 BTF_KIND_VOLATILE
+~~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_VOLATILE
+ * ``info.vlen``: 0
+ * ``type``: the type with ``volatile`` qualifier
+
+No additional type data follow ``btf_type``.
+
+2.2.10 BTF_KIND_CONST
+~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_CONST
+ * ``info.vlen``: 0
+ * ``type``: the type with ``const`` qualifier
+
+No additional type data follow ``btf_type``.
+
+2.2.11 BTF_KIND_RESTRICT
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_RESTRICT
+ * ``info.vlen``: 0
+ * ``type``: the type with ``restrict`` qualifier
+
+No additional type data follow ``btf_type``.
+
+2.2.12 BTF_KIND_FUNC
+~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: offset to a valid C identifier
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_FUNC
+ * ``info.vlen``: 0
+ * ``type``: a BTF_KIND_FUNC_PROTO type
+
+No additional type data follow ``btf_type``.
+
+A BTF_KIND_FUNC defines, not a type, but a subprogram (function) whose
+signature is defined by ``type``. The subprogram is thus an instance of
+that type. The BTF_KIND_FUNC may in turn be referenced by a func_info in
+the :ref:`BTF_Ext_Section` (ELF) or in the arguments to
+:ref:`BPF_Prog_Load` (ABI).
+
+2.2.13 BTF_KIND_FUNC_PROTO
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_FUNC_PROTO
+ * ``info.vlen``: # of parameters
+ * ``type``: the return type
+
+``btf_type`` is followed by ``info.vlen`` number of ``struct btf_param``.::
+
+ struct btf_param {
+ __u32 name_off;
+ __u32 type;
+ };
+
+If a BTF_KIND_FUNC_PROTO type is referred by a BTF_KIND_FUNC type,
+then ``btf_param.name_off`` must point to a valid C identifier
+except for the possible last argument representing the variable
+argument. The btf_param.type refers to parameter type.
+
+If the function has variable arguments, the last parameter
+is encoded with ``name_off = 0`` and ``type = 0``.
+
+3. BTF Kernel API
+*****************
+
+The following bpf syscall command involves BTF:
+ * BPF_BTF_LOAD: load a blob of BTF data into kernel
+ * BPF_MAP_CREATE: map creation with btf key and value type info.
+ * BPF_PROG_LOAD: prog load with btf function and line info.
+ * BPF_BTF_GET_FD_BY_ID: get a btf fd
+ * BPF_OBJ_GET_INFO_BY_FD: btf, func_info, line_info
+ and other btf related info are returned.
+
+The workflow typically looks like:
+::
+
+ Application:
+ BPF_BTF_LOAD
+ |
+ v
+ BPF_MAP_CREATE and BPF_PROG_LOAD
+ |
+ V
+ ......
+
+ Introspection tool:
+ ......
+ BPF_{PROG,MAP}_GET_NEXT_ID (get prog/map id's)
+ |
+ V
+ BPF_{PROG,MAP}_GET_FD_BY_ID (get a prog/map fd)
+ |
+ V
+ BPF_OBJ_GET_INFO_BY_FD (get bpf_prog_info/bpf_map_info with btf_id)
+ | |
+ V |
+ BPF_BTF_GET_FD_BY_ID (get btf_fd) |
+ | |
+ V |
+ BPF_OBJ_GET_INFO_BY_FD (get btf) |
+ | |
+ V V
+ pretty print types, dump func signatures and line info, etc.
+
+
+3.1 BPF_BTF_LOAD
+================
+
+Load a blob of BTF data into kernel. A blob of data
+described in :ref:`BTF_Type_String`
+can be directly loaded into the kernel.
+A ``btf_fd`` returns to userspace.
+
+3.2 BPF_MAP_CREATE
+==================
+
+A map can be created with ``btf_fd`` and specified key/value type id.::
+
+ __u32 btf_fd; /* fd pointing to a BTF type data */
+ __u32 btf_key_type_id; /* BTF type_id of the key */
+ __u32 btf_value_type_id; /* BTF type_id of the value */
+
+In libbpf, the map can be defined with extra annotation like below:
+::
+
+ struct bpf_map_def SEC("maps") btf_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct ipv_counts),
+ .max_entries = 4,
+ };
+ BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
+
+Here, the parameters for macro BPF_ANNOTATE_KV_PAIR are map name,
+key and value types for the map.
+During ELF parsing, libbpf is able to extract key/value type_id's
+and assigned them to BPF_MAP_CREATE attributes automatically.
+
+.. _BPF_Prog_Load:
+
+3.3 BPF_PROG_LOAD
+=================
+
+During prog_load, func_info and line_info can be passed to kernel with
+proper values for the following attributes:
+::
+
+ __u32 insn_cnt;
+ __aligned_u64 insns;
+ ......
+ __u32 prog_btf_fd; /* fd pointing to BTF type data */
+ __u32 func_info_rec_size; /* userspace bpf_func_info size */
+ __aligned_u64 func_info; /* func info */
+ __u32 func_info_cnt; /* number of bpf_func_info records */
+ __u32 line_info_rec_size; /* userspace bpf_line_info size */
+ __aligned_u64 line_info; /* line info */
+ __u32 line_info_cnt; /* number of bpf_line_info records */
+
+The func_info and line_info are an array of below, respectively.::
+
+ struct bpf_func_info {
+ __u32 insn_off; /* [0, insn_cnt - 1] */
+ __u32 type_id; /* pointing to a BTF_KIND_FUNC type */
+ };
+ struct bpf_line_info {
+ __u32 insn_off; /* [0, insn_cnt - 1] */
+ __u32 file_name_off; /* offset to string table for the filename */
+ __u32 line_off; /* offset to string table for the source line */
+ __u32 line_col; /* line number and column number */
+ };
+
+func_info_rec_size is the size of each func_info record, and line_info_rec_size
+is the size of each line_info record. Passing the record size to kernel make
+it possible to extend the record itself in the future.
+
+Below are requirements for func_info:
+ * func_info[0].insn_off must be 0.
+ * the func_info insn_off is in strictly increasing order and matches
+ bpf func boundaries.
+
+Below are requirements for line_info:
+ * the first insn in each func must points to a line_info record.
+ * the line_info insn_off is in strictly increasing order.
+
+For line_info, the line number and column number are defined as below:
+::
+
+ #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
+ #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
+
+3.4 BPF_{PROG,MAP}_GET_NEXT_ID
+
+In kernel, every loaded program, map or btf has a unique id.
+The id won't change during the life time of the program, map or btf.
+
+The bpf syscall command BPF_{PROG,MAP}_GET_NEXT_ID
+returns all id's, one for each command, to user space, for bpf
+program or maps,
+so the inspection tool can inspect all programs and maps.
+
+3.5 BPF_{PROG,MAP}_GET_FD_BY_ID
+
+The introspection tool cannot use id to get details about program or maps.
+A file descriptor needs to be obtained first for reference counting purpose.
+
+3.6 BPF_OBJ_GET_INFO_BY_FD
+==========================
+
+Once a program/map fd is acquired, the introspection tool can
+get the detailed information from kernel about this fd,
+some of which is btf related. For example,
+``bpf_map_info`` returns ``btf_id``, key/value type id.
+``bpf_prog_info`` returns ``btf_id``, func_info and line info
+for translated bpf byte codes, and jited_line_info.
+
+3.7 BPF_BTF_GET_FD_BY_ID
+========================
+
+With ``btf_id`` obtained in ``bpf_map_info`` and ``bpf_prog_info``,
+bpf syscall command BPF_BTF_GET_FD_BY_ID can retrieve a btf fd.
+Then, with command BPF_OBJ_GET_INFO_BY_FD, the btf blob, originally
+loaded into the kernel with BPF_BTF_LOAD, can be retrieved.
+
+With the btf blob, ``bpf_map_info`` and ``bpf_prog_info``, the introspection
+tool has full btf knowledge and is able to pretty print map key/values,
+dump func signatures, dump line info along with byte/jit codes.
+
+4. ELF File Format Interface
+****************************
+
+4.1 .BTF section
+================
+
+The .BTF section contains type and string data. The format of this section
+is same as the one describe in :ref:`BTF_Type_String`.
+
+.. _BTF_Ext_Section:
+
+4.2 .BTF.ext section
+====================
+
+The .BTF.ext section encodes func_info and line_info which
+needs loader manipulation before loading into the kernel.
+
+The specification for .BTF.ext section is defined at
+``tools/lib/bpf/btf.h`` and ``tools/lib/bpf/btf.c``.
+
+The current header of .BTF.ext section::
+
+ struct btf_ext_header {
+ __u16 magic;
+ __u8 version;
+ __u8 flags;
+ __u32 hdr_len;
+
+ /* All offsets are in bytes relative to the end of this header */
+ __u32 func_info_off;
+ __u32 func_info_len;
+ __u32 line_info_off;
+ __u32 line_info_len;
+ };
+
+It is very similar to .BTF section. Instead of type/string section,
+it contains func_info and line_info section. See :ref:`BPF_Prog_Load`
+for details about func_info and line_info record format.
+
+The func_info is organized as below.::
+
+ func_info_rec_size
+ btf_ext_info_sec for section #1 /* func_info for section #1 */
+ btf_ext_info_sec for section #2 /* func_info for section #2 */
+ ...
+
+``func_info_rec_size`` specifies the size of ``bpf_func_info`` structure
+when .BTF.ext is generated. btf_ext_info_sec, defined below, is
+the func_info for each specific ELF section.::
+
+ struct btf_ext_info_sec {
+ __u32 sec_name_off; /* offset to section name */
+ __u32 num_info;
+ /* Followed by num_info * record_size number of bytes */
+ __u8 data[0];
+ };
+
+Here, num_info must be greater than 0.
+
+The line_info is organized as below.::
+
+ line_info_rec_size
+ btf_ext_info_sec for section #1 /* line_info for section #1 */
+ btf_ext_info_sec for section #2 /* line_info for section #2 */
+ ...
+
+``line_info_rec_size`` specifies the size of ``bpf_line_info`` structure
+when .BTF.ext is generated.
+
+The interpretation of ``bpf_func_info->insn_off`` and
+``bpf_line_info->insn_off`` is different between kernel API and ELF API.
+For kernel API, the ``insn_off`` is the instruction offset in the unit
+of ``struct bpf_insn``. For ELF API, the ``insn_off`` is the byte offset
+from the beginning of section (``btf_ext_info_sec->sec_name_off``).
+
+5. Using BTF
+************
+
+5.1 bpftool map pretty print
+============================
+
+With BTF, the map key/value can be printed based on fields rather than
+simply raw bytes. This is especially
+valuable for large structure or if you data structure
+has bitfields. For example, for the following map,::
+
+ enum A { A1, A2, A3, A4, A5 };
+ typedef enum A ___A;
+ struct tmp_t {
+ char a1:4;
+ int a2:4;
+ int :4;
+ __u32 a3:4;
+ int b;
+ ___A b1:4;
+ enum A b2:4;
+ };
+ struct bpf_map_def SEC("maps") tmpmap = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct tmp_t),
+ .max_entries = 1,
+ };
+ BPF_ANNOTATE_KV_PAIR(tmpmap, int, struct tmp_t);
+
+bpftool is able to pretty print like below:
+::
+
+ [{
+ "key": 0,
+ "value": {
+ "a1": 0x2,
+ "a2": 0x4,
+ "a3": 0x6,
+ "b": 7,
+ "b1": 0x8,
+ "b2": 0xa
+ }
+ }
+ ]
+
+5.2 bpftool prog dump
+=====================
+
+The following is an example to show func_info and line_info
+can help prog dump with better kernel symbol name, function prototype
+and line information.::
+
+ $ bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv
+ [...]
+ int test_long_fname_2(struct dummy_tracepoint_args * arg):
+ bpf_prog_44a040bf25481309_test_long_fname_2:
+ ; static int test_long_fname_2(struct dummy_tracepoint_args *arg)
+ 0: push %rbp
+ 1: mov %rsp,%rbp
+ 4: sub $0x30,%rsp
+ b: sub $0x28,%rbp
+ f: mov %rbx,0x0(%rbp)
+ 13: mov %r13,0x8(%rbp)
+ 17: mov %r14,0x10(%rbp)
+ 1b: mov %r15,0x18(%rbp)
+ 1f: xor %eax,%eax
+ 21: mov %rax,0x20(%rbp)
+ 25: xor %esi,%esi
+ ; int key = 0;
+ 27: mov %esi,-0x4(%rbp)
+ ; if (!arg->sock)
+ 2a: mov 0x8(%rdi),%rdi
+ ; if (!arg->sock)
+ 2e: cmp $0x0,%rdi
+ 32: je 0x0000000000000070
+ 34: mov %rbp,%rsi
+ ; counts = bpf_map_lookup_elem(&btf_map, &key);
+ [...]
+
+5.3 verifier log
+================
+
+The following is an example how line_info can help verifier failure debug.::
+
+ /* The code at tools/testing/selftests/bpf/test_xdp_noinline.c
+ * is modified as below.
+ */
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+ /*
+ if (data + 4 > data_end)
+ return XDP_DROP;
+ */
+ *(u32 *)data = dst->dst;
+
+ $ bpftool prog load ./test_xdp_noinline.o /sys/fs/bpf/test_xdp_noinline type xdp
+ ; data = (void *)(long)xdp->data;
+ 224: (79) r2 = *(u64 *)(r10 -112)
+ 225: (61) r2 = *(u32 *)(r2 +0)
+ ; *(u32 *)data = dst->dst;
+ 226: (63) *(u32 *)(r2 +0) = r1
+ invalid access to packet, off=0 size=4, R2(id=0,off=0,r=0)
+ R2 offset is outside of the packet
+
+6. BTF Generation
+*****************
+
+You need latest pahole
+
+ https://git.kernel.org/pub/scm/devel/pahole/pahole.git/
+
+or llvm (8.0 or later). The pahole acts as a dwarf2btf converter. It doesn't support .BTF.ext
+and btf BTF_KIND_FUNC type yet. For example,::
+
+ -bash-4.4$ cat t.c
+ struct t {
+ int a:2;
+ int b:3;
+ int c:2;
+ } g;
+ -bash-4.4$ gcc -c -O2 -g t.c
+ -bash-4.4$ pahole -JV t.o
+ File t.o:
+ [1] STRUCT t kind_flag=1 size=4 vlen=3
+ a type_id=2 bitfield_size=2 bits_offset=0
+ b type_id=2 bitfield_size=3 bits_offset=2
+ c type_id=2 bitfield_size=2 bits_offset=5
+ [2] INT int size=4 bit_offset=0 nr_bits=32 encoding=SIGNED
+
+The llvm is able to generate .BTF and .BTF.ext directly with -g for bpf target only.
+The assembly code (-S) is able to show the BTF encoding in assembly format.::
+
+ -bash-4.4$ cat t2.c
+ typedef int __int32;
+ struct t2 {
+ int a2;
+ int (*f2)(char q1, __int32 q2, ...);
+ int (*f3)();
+ } g2;
+ int main() { return 0; }
+ int test() { return 0; }
+ -bash-4.4$ clang -c -g -O2 -target bpf t2.c
+ -bash-4.4$ readelf -S t2.o
+ ......
+ [ 8] .BTF PROGBITS 0000000000000000 00000247
+ 000000000000016e 0000000000000000 0 0 1
+ [ 9] .BTF.ext PROGBITS 0000000000000000 000003b5
+ 0000000000000060 0000000000000000 0 0 1
+ [10] .rel.BTF.ext REL 0000000000000000 000007e0
+ 0000000000000040 0000000000000010 16 9 8
+ ......
+ -bash-4.4$ clang -S -g -O2 -target bpf t2.c
+ -bash-4.4$ cat t2.s
+ ......
+ .section .BTF,"",@progbits
+ .short 60319 # 0xeb9f
+ .byte 1
+ .byte 0
+ .long 24
+ .long 0
+ .long 220
+ .long 220
+ .long 122
+ .long 0 # BTF_KIND_FUNC_PROTO(id = 1)
+ .long 218103808 # 0xd000000
+ .long 2
+ .long 83 # BTF_KIND_INT(id = 2)
+ .long 16777216 # 0x1000000
+ .long 4
+ .long 16777248 # 0x1000020
+ ......
+ .byte 0 # string offset=0
+ .ascii ".text" # string offset=1
+ .byte 0
+ .ascii "/home/yhs/tmp-pahole/t2.c" # string offset=7
+ .byte 0
+ .ascii "int main() { return 0; }" # string offset=33
+ .byte 0
+ .ascii "int test() { return 0; }" # string offset=58
+ .byte 0
+ .ascii "int" # string offset=83
+ ......
+ .section .BTF.ext,"",@progbits
+ .short 60319 # 0xeb9f
+ .byte 1
+ .byte 0
+ .long 24
+ .long 0
+ .long 28
+ .long 28
+ .long 44
+ .long 8 # FuncInfo
+ .long 1 # FuncInfo section string offset=1
+ .long 2
+ .long .Lfunc_begin0
+ .long 3
+ .long .Lfunc_begin1
+ .long 5
+ .long 16 # LineInfo
+ .long 1 # LineInfo section string offset=1
+ .long 2
+ .long .Ltmp0
+ .long 7
+ .long 33
+ .long 7182 # Line 7 Col 14
+ .long .Ltmp3
+ .long 7
+ .long 58
+ .long 8206 # Line 8 Col 14
+
+7. Testing
+**********
+
+Kernel bpf selftest `test_btf.c` provides extensive set of BTF related tests.
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 00a8450a602f..4e77932959cc 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -15,6 +15,13 @@ that goes into great technical depth about the BPF Architecture.
The primary info for the bpf syscall is available in the `man-pages`_
for `bpf(2)`_.
+BPF Type Format (BTF)
+=====================
+
+.. toctree::
+ :maxdepth: 1
+
+ btf
Frequently asked questions (FAQ)
diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt
index 0f407fb371ce..8d58c2a7de39 100644
--- a/Documentation/devicetree/bindings/net/dsa/ksz.txt
+++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt
@@ -19,58 +19,58 @@ Examples:
Ethernet switch connected via SPI to the host, CPU port wired to eth0:
- eth0: ethernet@10001000 {
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
+ eth0: ethernet@10001000 {
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
- spi1: spi@f8008000 {
- pinctrl-0 = <&pinctrl_spi_ksz>;
- cs-gpios = <&pioC 25 0>;
- id = <1>;
+ spi1: spi@f8008000 {
+ pinctrl-0 = <&pinctrl_spi_ksz>;
+ cs-gpios = <&pioC 25 0>;
+ id = <1>;
- ksz9477: ksz9477@0 {
- compatible = "microchip,ksz9477";
- reg = <0>;
+ ksz9477: ksz9477@0 {
+ compatible = "microchip,ksz9477";
+ reg = <0>;
- spi-max-frequency = <44000000>;
- spi-cpha;
- spi-cpol;
+ spi-max-frequency = <44000000>;
+ spi-cpha;
+ spi-cpol;
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- label = "lan1";
- };
- port@1 {
- reg = <1>;
- label = "lan2";
- };
- port@2 {
- reg = <2>;
- label = "lan3";
- };
- port@3 {
- reg = <3>;
- label = "lan4";
- };
- port@4 {
- reg = <4>;
- label = "lan5";
- };
- port@5 {
- reg = <5>;
- label = "cpu";
- ethernet = <&eth0>;
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
- };
- };
- };
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+ port@4 {
+ reg = <4>;
+ label = "lan5";
+ };
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&eth0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/qcom,ethqos.txt b/Documentation/devicetree/bindings/net/qcom,ethqos.txt
new file mode 100644
index 000000000000..fcf5035810b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,ethqos.txt
@@ -0,0 +1,64 @@
+Qualcomm Ethernet ETHQOS device
+
+This documents dwmmac based ethernet device which supports Gigabit
+ethernet for version v2.3.0 onwards.
+
+This device has following properties:
+
+Required properties:
+
+- compatible: Should be qcom,qcs404-ethqos"
+
+- reg: Address and length of the register set for the device
+
+- reg-names: Should contain register names "stmmaceth", "rgmii"
+
+- clocks: Should contain phandle to clocks
+
+- clock-names: Should contain clock names "stmmaceth", "pclk",
+ "ptp_ref", "rgmii"
+
+- interrupts: Should contain phandle to interrupts
+
+- interrupt-names: Should contain interrupt names "macirq", "eth_lpi"
+
+Rest of the properties are defined in stmmac.txt file in same directory
+
+
+Example:
+
+ethernet: ethernet@7a80000 {
+ compatible = "qcom,qcs404-ethqos";
+ reg = <0x07a80000 0x10000>,
+ <0x07a96000 0x100>;
+ reg-names = "stmmaceth", "rgmii";
+ clock-names = "stmmaceth", "pclk", "ptp_ref", "rgmii";
+ clocks = <&gcc GCC_ETH_AXI_CLK>,
+ <&gcc GCC_ETH_SLAVE_AHB_CLK>,
+ <&gcc GCC_ETH_PTP_CLK>,
+ <&gcc GCC_ETH_RGMII_CLK>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ snps,reset-gpio = <&tlmm 60 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+
+ snps,txpbl = <8>;
+ snps,rxpbl = <2>;
+ snps,aal;
+ snps,tso;
+
+ phy-handle = <&phy1>;
+ phy-mode = "rgmii";
+
+ mdio {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: phy@4 {
+ device_type = "ethernet-phy";
+ reg = <0x4>;
+ };
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
index c5d0e7998e2b..8e7f8551d190 100644
--- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
+++ b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
@@ -17,6 +17,8 @@ Clock Properties:
- fsl,tmr-fiper1 Fixed interval period pulse generator.
- fsl,tmr-fiper2 Fixed interval period pulse generator.
- fsl,max-adj Maximum frequency adjustment in parts per billion.
+ - fsl,extts-fifo The presence of this property indicates hardware
+ support for the external trigger stamp FIFO.
These properties set the operational parameters for the PTP
clock. You must choose these carefully for the clock to work right.
diff --git a/Documentation/networking/devlink-params-mlxsw.txt b/Documentation/networking/devlink-params-mlxsw.txt
new file mode 100644
index 000000000000..2c5c67a920c9
--- /dev/null
+++ b/Documentation/networking/devlink-params-mlxsw.txt
@@ -0,0 +1,2 @@
+fw_load_policy [DEVICE, GENERIC]
+ Configuration mode: driverinit
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 25170ad7d25b..1000b821681c 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -236,19 +236,6 @@ description.
Design limitations
==================
-DSA is a platform device driver
--------------------------------
-
-DSA is implemented as a DSA platform device driver which is convenient because
-it will register the entire DSA switch tree attached to a master network device
-in one-shot, facilitating the device creation and simplifying the device driver
-model a bit, this comes however with a number of limitations:
-
-- building DSA and its switch drivers as modules is currently not working
-- the device driver parenting does not necessarily reflect the original
- bus/device the switch can be created from
-- supporting non-MDIO and non-MMIO (platform) switches is not possible
-
Limits on the number of devices and ports
-----------------------------------------
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 2196b824e96c..01603bc2eff1 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -865,7 +865,7 @@ Three LSB bits store instruction class which is one of:
BPF_STX 0x03 BPF_STX 0x03
BPF_ALU 0x04 BPF_ALU 0x04
BPF_JMP 0x05 BPF_JMP 0x05
- BPF_RET 0x06 [ class 6 unused, for future if needed ]
+ BPF_RET 0x06 BPF_JMP32 0x06
BPF_MISC 0x07 BPF_ALU64 0x07
When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ...
@@ -902,9 +902,9 @@ If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of:
BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */
BPF_END 0xd0 /* eBPF only: endianness conversion */
-If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
+If BPF_CLASS(code) == BPF_JMP or BPF_JMP32 [ in eBPF ], BPF_OP(code) is one of:
- BPF_JA 0x00
+ BPF_JA 0x00 /* BPF_JMP only */
BPF_JEQ 0x10
BPF_JGT 0x20
BPF_JGE 0x30
@@ -912,8 +912,8 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
BPF_JNE 0x50 /* eBPF only: jump != */
BPF_JSGT 0x60 /* eBPF only: signed '>' */
BPF_JSGE 0x70 /* eBPF only: signed '>=' */
- BPF_CALL 0x80 /* eBPF only: function call */
- BPF_EXIT 0x90 /* eBPF only: function return */
+ BPF_CALL 0x80 /* eBPF BPF_JMP only: function call */
+ BPF_EXIT 0x90 /* eBPF BPF_JMP only: function return */
BPF_JLT 0xa0 /* eBPF only: unsigned '<' */
BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */
BPF_JSLT 0xc0 /* eBPF only: signed '<' */
@@ -936,8 +936,9 @@ Classic BPF wastes the whole BPF_RET class to represent a single 'ret'
operation. Classic BPF_RET | BPF_K means copy imm32 into return register
and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT
in eBPF means function exit only. The eBPF program needs to store return
-value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently
-unused and reserved for future use.
+value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is used as
+BPF_JMP32 to mean exactly the same operations as BPF_JMP, but with 32-bit wide
+operands for the comparisons instead.
For load and store instructions the 8-bit 'code' field is divided as:
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 59e86de662cd..f1627ca2a0ea 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -29,6 +29,7 @@ Contents:
msg_zerocopy
failover
net_failover
+ phy
alias
bridge
snmp_counter
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
new file mode 100644
index 000000000000..0dd90d7df5ec
--- /dev/null
+++ b/Documentation/networking/phy.rst
@@ -0,0 +1,447 @@
+=====================
+PHY Abstraction Layer
+=====================
+
+Purpose
+=======
+
+Most network devices consist of set of registers which provide an interface
+to a MAC layer, which communicates with the physical connection through a
+PHY. The PHY concerns itself with negotiating link parameters with the link
+partner on the other side of the network connection (typically, an ethernet
+cable), and provides a register interface to allow drivers to determine what
+settings were chosen, and to configure what settings are allowed.
+
+While these devices are distinct from the network devices, and conform to a
+standard layout for the registers, it has been common practice to integrate
+the PHY management code with the network driver. This has resulted in large
+amounts of redundant code. Also, on embedded systems with multiple (and
+sometimes quite different) ethernet controllers connected to the same
+management bus, it is difficult to ensure safe use of the bus.
+
+Since the PHYs are devices, and the management busses through which they are
+accessed are, in fact, busses, the PHY Abstraction Layer treats them as such.
+In doing so, it has these goals:
+
+#. Increase code-reuse
+#. Increase overall code-maintainability
+#. Speed development time for new network drivers, and for new systems
+
+Basically, this layer is meant to provide an interface to PHY devices which
+allows network driver writers to write as little code as possible, while
+still providing a full feature set.
+
+The MDIO bus
+============
+
+Most network devices are connected to a PHY by means of a management bus.
+Different devices use different busses (though some share common interfaces).
+In order to take advantage of the PAL, each bus interface needs to be
+registered as a distinct device.
+
+#. read and write functions must be implemented. Their prototypes are::
+
+ int write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
+ int read(struct mii_bus *bus, int mii_id, int regnum);
+
+ mii_id is the address on the bus for the PHY, and regnum is the register
+ number. These functions are guaranteed not to be called from interrupt
+ time, so it is safe for them to block, waiting for an interrupt to signal
+ the operation is complete
+
+#. A reset function is optional. This is used to return the bus to an
+ initialized state.
+
+#. A probe function is needed. This function should set up anything the bus
+ driver needs, setup the mii_bus structure, and register with the PAL using
+ mdiobus_register. Similarly, there's a remove function to undo all of
+ that (use mdiobus_unregister).
+
+#. Like any driver, the device_driver structure must be configured, and init
+ exit functions are used to register the driver.
+
+#. The bus must also be declared somewhere as a device, and registered.
+
+As an example for how one driver implemented an mdio bus driver, see
+drivers/net/ethernet/freescale/fsl_pq_mdio.c and an associated DTS file
+for one of the users. (e.g. "git grep fsl,.*-mdio arch/powerpc/boot/dts/")
+
+(RG)MII/electrical interface considerations
+===========================================
+
+The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin
+electrical signal interface using a synchronous 125Mhz clock signal and several
+data lines. Due to this design decision, a 1.5ns to 2ns delay must be added
+between the clock line (RXC or TXC) and the data lines to let the PHY (clock
+sink) have enough setup and hold times to sample the data lines correctly. The
+PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let
+the PHY driver and optionally the MAC driver, implement the required delay. The
+values of phy_interface_t must be understood from the perspective of the PHY
+device itself, leading to the following:
+
+* PHY_INTERFACE_MODE_RGMII: the PHY is not responsible for inserting any
+ internal delay by itself, it assumes that either the Ethernet MAC (if capable
+ or the PCB traces) insert the correct 1.5-2ns delay
+
+* PHY_INTERFACE_MODE_RGMII_TXID: the PHY should insert an internal delay
+ for the transmit data lines (TXD[3:0]) processed by the PHY device
+
+* PHY_INTERFACE_MODE_RGMII_RXID: the PHY should insert an internal delay
+ for the receive data lines (RXD[3:0]) processed by the PHY device
+
+* PHY_INTERFACE_MODE_RGMII_ID: the PHY should insert internal delays for
+ both transmit AND receive data lines from/to the PHY device
+
+Whenever possible, use the PHY side RGMII delay for these reasons:
+
+* PHY devices may offer sub-nanosecond granularity in how they allow a
+ receiver/transmitter side delay (e.g: 0.5, 1.0, 1.5ns) to be specified. Such
+ precision may be required to account for differences in PCB trace lengths
+
+* PHY devices are typically qualified for a large range of applications
+ (industrial, medical, automotive...), and they provide a constant and
+ reliable delay across temperature/pressure/voltage ranges
+
+* PHY device drivers in PHYLIB being reusable by nature, being able to
+ configure correctly a specified delay enables more designs with similar delay
+ requirements to be operate correctly
+
+For cases where the PHY is not capable of providing this delay, but the
+Ethernet MAC driver is capable of doing so, the correct phy_interface_t value
+should be PHY_INTERFACE_MODE_RGMII, and the Ethernet MAC driver should be
+configured correctly in order to provide the required transmit and/or receive
+side delay from the perspective of the PHY device. Conversely, if the Ethernet
+MAC driver looks at the phy_interface_t value, for any other mode but
+PHY_INTERFACE_MODE_RGMII, it should make sure that the MAC-level delays are
+disabled.
+
+In case neither the Ethernet MAC, nor the PHY are capable of providing the
+required delays, as defined per the RGMII standard, several options may be
+available:
+
+* Some SoCs may offer a pin pad/mux/controller capable of configuring a given
+ set of pins'strength, delays, and voltage; and it may be a suitable
+ option to insert the expected 2ns RGMII delay.
+
+* Modifying the PCB design to include a fixed delay (e.g: using a specifically
+ designed serpentine), which may not require software configuration at all.
+
+Common problems with RGMII delay mismatch
+-----------------------------------------
+
+When there is a RGMII delay mismatch between the Ethernet MAC and the PHY, this
+will most likely result in the clock and data line signals to be unstable when
+the PHY or MAC take a snapshot of these signals to translate them into logical
+1 or 0 states and reconstruct the data being transmitted/received. Typical
+symptoms include:
+
+* Transmission/reception partially works, and there is frequent or occasional
+ packet loss observed
+
+* Ethernet MAC may report some or all packets ingressing with a FCS/CRC error,
+ or just discard them all
+
+* Switching to lower speeds such as 10/100Mbits/sec makes the problem go away
+ (since there is enough setup/hold time in that case)
+
+Connecting to a PHY
+===================
+
+Sometime during startup, the network driver needs to establish a connection
+between the PHY device, and the network device. At this time, the PHY's bus
+and drivers need to all have been loaded, so it is ready for the connection.
+At this point, there are several ways to connect to the PHY:
+
+#. The PAL handles everything, and only calls the network driver when
+ the link state changes, so it can react.
+
+#. The PAL handles everything except interrupts (usually because the
+ controller has the interrupt registers).
+
+#. The PAL handles everything, but checks in with the driver every second,
+ allowing the network driver to react first to any changes before the PAL
+ does.
+
+#. The PAL serves only as a library of functions, with the network device
+ manually calling functions to update status, and configure the PHY
+
+
+Letting the PHY Abstraction Layer do Everything
+===============================================
+
+If you choose option 1 (The hope is that every driver can, but to still be
+useful to drivers that can't), connecting to the PHY is simple:
+
+First, you need a function to react to changes in the link state. This
+function follows this protocol::
+
+ static void adjust_link(struct net_device *dev);
+
+Next, you need to know the device name of the PHY connected to this device.
+The name will look something like, "0:00", where the first number is the
+bus id, and the second is the PHY's address on that bus. Typically,
+the bus is responsible for making its ID unique.
+
+Now, to connect, just call this function::
+
+ phydev = phy_connect(dev, phy_name, &adjust_link, interface);
+
+*phydev* is a pointer to the phy_device structure which represents the PHY.
+If phy_connect is successful, it will return the pointer. dev, here, is the
+pointer to your net_device. Once done, this function will have started the
+PHY's software state machine, and registered for the PHY's interrupt, if it
+has one. The phydev structure will be populated with information about the
+current state, though the PHY will not yet be truly operational at this
+point.
+
+PHY-specific flags should be set in phydev->dev_flags prior to the call
+to phy_connect() such that the underlying PHY driver can check for flags
+and perform specific operations based on them.
+This is useful if the system has put hardware restrictions on
+the PHY/controller, of which the PHY needs to be aware.
+
+*interface* is a u32 which specifies the connection type used
+between the controller and the PHY. Examples are GMII, MII,
+RGMII, and SGMII. For a full list, see include/linux/phy.h
+
+Now just make sure that phydev->supported and phydev->advertising have any
+values pruned from them which don't make sense for your controller (a 10/100
+controller may be connected to a gigabit capable PHY, so you would need to
+mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions
+for these bitfields. Note that you should not SET any bits, except the
+SUPPORTED_Pause and SUPPORTED_AsymPause bits (see below), or the PHY may get
+put into an unsupported state.
+
+Lastly, once the controller is ready to handle network traffic, you call
+phy_start(phydev). This tells the PAL that you are ready, and configures the
+PHY to connect to the network. If the MAC interrupt of your network driver
+also handles PHY status changes, just set phydev->irq to PHY_IGNORE_INTERRUPT
+before you call phy_start and use phy_mac_interrupt() from the network
+driver. If you don't want to use interrupts, set phydev->irq to PHY_POLL.
+phy_start() enables the PHY interrupts (if applicable) and starts the
+phylib state machine.
+
+When you want to disconnect from the network (even if just briefly), you call
+phy_stop(phydev). This function also stops the phylib state machine and
+disables PHY interrupts.
+
+Pause frames / flow control
+===========================
+
+The PHY does not participate directly in flow control/pause frames except by
+making sure that the SUPPORTED_Pause and SUPPORTED_AsymPause bits are set in
+MII_ADVERTISE to indicate towards the link partner that the Ethernet MAC
+controller supports such a thing. Since flow control/pause frames generation
+involves the Ethernet MAC driver, it is recommended that this driver takes care
+of properly indicating advertisement and support for such features by setting
+the SUPPORTED_Pause and SUPPORTED_AsymPause bits accordingly. This can be done
+either before or after phy_connect() and/or as a result of implementing the
+ethtool::set_pauseparam feature.
+
+
+Keeping Close Tabs on the PAL
+=============================
+
+It is possible that the PAL's built-in state machine needs a little help to
+keep your network device and the PHY properly in sync. If so, you can
+register a helper function when connecting to the PHY, which will be called
+every second before the state machine reacts to any changes. To do this, you
+need to manually call phy_attach() and phy_prepare_link(), and then call
+phy_start_machine() with the second argument set to point to your special
+handler.
+
+Currently there are no examples of how to use this functionality, and testing
+on it has been limited because the author does not have any drivers which use
+it (they all use option 1). So Caveat Emptor.
+
+Doing it all yourself
+=====================
+
+There's a remote chance that the PAL's built-in state machine cannot track
+the complex interactions between the PHY and your network device. If this is
+so, you can simply call phy_attach(), and not call phy_start_machine or
+phy_prepare_link(). This will mean that phydev->state is entirely yours to
+handle (phy_start and phy_stop toggle between some of the states, so you
+might need to avoid them).
+
+An effort has been made to make sure that useful functionality can be
+accessed without the state-machine running, and most of these functions are
+descended from functions which did not interact with a complex state-machine.
+However, again, no effort has been made so far to test running without the
+state machine, so tryer beware.
+
+Here is a brief rundown of the functions::
+
+ int phy_read(struct phy_device *phydev, u16 regnum);
+ int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
+
+Simple read/write primitives. They invoke the bus's read/write function
+pointers.
+::
+
+ void phy_print_status(struct phy_device *phydev);
+
+A convenience function to print out the PHY status neatly.
+::
+
+ void phy_request_interrupt(struct phy_device *phydev);
+
+Requests the IRQ for the PHY interrupts.
+::
+
+ struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
+ phy_interface_t interface);
+
+Attaches a network device to a particular PHY, binding the PHY to a generic
+driver if none was found during bus initialization.
+::
+
+ int phy_start_aneg(struct phy_device *phydev);
+
+Using variables inside the phydev structure, either configures advertising
+and resets autonegotiation, or disables autonegotiation, and configures
+forced settings.
+::
+
+ static inline int phy_read_status(struct phy_device *phydev);
+
+Fills the phydev structure with up-to-date information about the current
+settings in the PHY.
+::
+
+ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+
+Ethtool convenience functions.
+::
+
+ int phy_mii_ioctl(struct phy_device *phydev,
+ struct mii_ioctl_data *mii_data, int cmd);
+
+The MII ioctl. Note that this function will completely screw up the state
+machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to
+use this only to write registers which are not standard, and don't set off
+a renegotiation.
+
+PHY Device Drivers
+==================
+
+With the PHY Abstraction Layer, adding support for new PHYs is
+quite easy. In some cases, no work is required at all! However,
+many PHYs require a little hand-holding to get up-and-running.
+
+Generic PHY driver
+------------------
+
+If the desired PHY doesn't have any errata, quirks, or special
+features you want to support, then it may be best to not add
+support, and let the PHY Abstraction Layer's Generic PHY Driver
+do all of the work.
+
+Writing a PHY driver
+--------------------
+
+If you do need to write a PHY driver, the first thing to do is
+make sure it can be matched with an appropriate PHY device.
+This is done during bus initialization by reading the device's
+UID (stored in registers 2 and 3), then comparing it to each
+driver's phy_id field by ANDing it with each driver's
+phy_id_mask field. Also, it needs a name. Here's an example::
+
+ static struct phy_driver dm9161_driver = {
+ .phy_id = 0x0181b880,
+ .name = "Davicom DM9161E",
+ .phy_id_mask = 0x0ffffff0,
+ ...
+ }
+
+Next, you need to specify what features (speed, duplex, autoneg,
+etc) your PHY device and driver support. Most PHYs support
+PHY_BASIC_FEATURES, but you can look in include/mii.h for other
+features.
+
+Each driver consists of a number of function pointers, documented
+in include/linux/phy.h under the phy_driver structure.
+
+Of these, only config_aneg and read_status are required to be
+assigned by the driver code. The rest are optional. Also, it is
+preferred to use the generic phy driver's versions of these two
+functions if at all possible: genphy_read_status and
+genphy_config_aneg. If this is not possible, it is likely that
+you only need to perform some actions before and after invoking
+these functions, and so your functions will wrap the generic
+ones.
+
+Feel free to look at the Marvell, Cicada, and Davicom drivers in
+drivers/net/phy/ for examples (the lxt and qsemi drivers have
+not been tested as of this writing).
+
+The PHY's MMD register accesses are handled by the PAL framework
+by default, but can be overridden by a specific PHY driver if
+required. This could be the case if a PHY was released for
+manufacturing before the MMD PHY register definitions were
+standardized by the IEEE. Most modern PHYs will be able to use
+the generic PAL framework for accessing the PHY's MMD registers.
+An example of such usage is for Energy Efficient Ethernet support,
+implemented in the PAL. This support uses the PAL to access MMD
+registers for EEE query and configuration if the PHY supports
+the IEEE standard access mechanisms, or can use the PHY's specific
+access interfaces if overridden by the specific PHY driver. See
+the Micrel driver in drivers/net/phy/ for an example of how this
+can be implemented.
+
+Board Fixups
+============
+
+Sometimes the specific interaction between the platform and the PHY requires
+special handling. For instance, to change where the PHY's clock input is,
+or to add a delay to account for latency issues in the data path. In order
+to support such contingencies, the PHY Layer allows platform code to register
+fixups to be run when the PHY is brought up (or subsequently reset).
+
+When the PHY Layer brings up a PHY it checks to see if there are any fixups
+registered for it, matching based on UID (contained in the PHY device's phy_id
+field) and the bus identifier (contained in phydev->dev.bus_id). Both must
+match, however two constants, PHY_ANY_ID and PHY_ANY_UID, are provided as
+wildcards for the bus ID and UID, respectively.
+
+When a match is found, the PHY layer will invoke the run function associated
+with the fixup. This function is passed a pointer to the phy_device of
+interest. It should therefore only operate on that PHY.
+
+The platform code can either register the fixup using phy_register_fixup()::
+
+ int phy_register_fixup(const char *phy_id,
+ u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *));
+
+Or using one of the two stubs, phy_register_fixup_for_uid() and
+phy_register_fixup_for_id()::
+
+ int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *));
+ int phy_register_fixup_for_id(const char *phy_id,
+ int (*run)(struct phy_device *));
+
+The stubs set one of the two matching criteria, and set the other one to
+match anything.
+
+When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module,
+unregister fixup and free allocate memory are required.
+
+Call one of following function before unloading module::
+
+ int phy_unregister_fixup(const char *phy_id, u32 phy_uid, u32 phy_uid_mask);
+ int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
+ int phy_register_fixup_for_id(const char *phy_id);
+
+Standards
+=========
+
+IEEE Standard 802.3: CSMA/CD Access Method and Physical Layer Specifications, Section Two:
+http://standards.ieee.org/getieee802/download/802.3-2008_section2.pdf
+
+RGMII v1.3:
+http://web.archive.org/web/20160303212629/http://www.hp.com/rnd/pdfs/RGMIIv1_3.pdf
+
+RGMII v2.0:
+http://web.archive.org/web/20160303171328/http://www.hp.com/rnd/pdfs/RGMIIv2_0_final_hp.pdf
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
deleted file mode 100644
index bdec0f700bc1..000000000000
--- a/Documentation/networking/phy.txt
+++ /dev/null
@@ -1,427 +0,0 @@
-
--------
-PHY Abstraction Layer
-(Updated 2008-04-08)
-
-Purpose
-
- Most network devices consist of set of registers which provide an interface
- to a MAC layer, which communicates with the physical connection through a
- PHY. The PHY concerns itself with negotiating link parameters with the link
- partner on the other side of the network connection (typically, an ethernet
- cable), and provides a register interface to allow drivers to determine what
- settings were chosen, and to configure what settings are allowed.
-
- While these devices are distinct from the network devices, and conform to a
- standard layout for the registers, it has been common practice to integrate
- the PHY management code with the network driver. This has resulted in large
- amounts of redundant code. Also, on embedded systems with multiple (and
- sometimes quite different) ethernet controllers connected to the same
- management bus, it is difficult to ensure safe use of the bus.
-
- Since the PHYs are devices, and the management busses through which they are
- accessed are, in fact, busses, the PHY Abstraction Layer treats them as such.
- In doing so, it has these goals:
-
- 1) Increase code-reuse
- 2) Increase overall code-maintainability
- 3) Speed development time for new network drivers, and for new systems
-
- Basically, this layer is meant to provide an interface to PHY devices which
- allows network driver writers to write as little code as possible, while
- still providing a full feature set.
-
-The MDIO bus
-
- Most network devices are connected to a PHY by means of a management bus.
- Different devices use different busses (though some share common interfaces).
- In order to take advantage of the PAL, each bus interface needs to be
- registered as a distinct device.
-
- 1) read and write functions must be implemented. Their prototypes are:
-
- int write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
- int read(struct mii_bus *bus, int mii_id, int regnum);
-
- mii_id is the address on the bus for the PHY, and regnum is the register
- number. These functions are guaranteed not to be called from interrupt
- time, so it is safe for them to block, waiting for an interrupt to signal
- the operation is complete
-
- 2) A reset function is optional. This is used to return the bus to an
- initialized state.
-
- 3) A probe function is needed. This function should set up anything the bus
- driver needs, setup the mii_bus structure, and register with the PAL using
- mdiobus_register. Similarly, there's a remove function to undo all of
- that (use mdiobus_unregister).
-
- 4) Like any driver, the device_driver structure must be configured, and init
- exit functions are used to register the driver.
-
- 5) The bus must also be declared somewhere as a device, and registered.
-
- As an example for how one driver implemented an mdio bus driver, see
- drivers/net/ethernet/freescale/fsl_pq_mdio.c and an associated DTS file
- for one of the users. (e.g. "git grep fsl,.*-mdio arch/powerpc/boot/dts/")
-
-(RG)MII/electrical interface considerations
-
- The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin
- electrical signal interface using a synchronous 125Mhz clock signal and several
- data lines. Due to this design decision, a 1.5ns to 2ns delay must be added
- between the clock line (RXC or TXC) and the data lines to let the PHY (clock
- sink) have enough setup and hold times to sample the data lines correctly. The
- PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let
- the PHY driver and optionally the MAC driver, implement the required delay. The
- values of phy_interface_t must be understood from the perspective of the PHY
- device itself, leading to the following:
-
- * PHY_INTERFACE_MODE_RGMII: the PHY is not responsible for inserting any
- internal delay by itself, it assumes that either the Ethernet MAC (if capable
- or the PCB traces) insert the correct 1.5-2ns delay
-
- * PHY_INTERFACE_MODE_RGMII_TXID: the PHY should insert an internal delay
- for the transmit data lines (TXD[3:0]) processed by the PHY device
-
- * PHY_INTERFACE_MODE_RGMII_RXID: the PHY should insert an internal delay
- for the receive data lines (RXD[3:0]) processed by the PHY device
-
- * PHY_INTERFACE_MODE_RGMII_ID: the PHY should insert internal delays for
- both transmit AND receive data lines from/to the PHY device
-
- Whenever possible, use the PHY side RGMII delay for these reasons:
-
- * PHY devices may offer sub-nanosecond granularity in how they allow a
- receiver/transmitter side delay (e.g: 0.5, 1.0, 1.5ns) to be specified. Such
- precision may be required to account for differences in PCB trace lengths
-
- * PHY devices are typically qualified for a large range of applications
- (industrial, medical, automotive...), and they provide a constant and
- reliable delay across temperature/pressure/voltage ranges
-
- * PHY device drivers in PHYLIB being reusable by nature, being able to
- configure correctly a specified delay enables more designs with similar delay
- requirements to be operate correctly
-
- For cases where the PHY is not capable of providing this delay, but the
- Ethernet MAC driver is capable of doing so, the correct phy_interface_t value
- should be PHY_INTERFACE_MODE_RGMII, and the Ethernet MAC driver should be
- configured correctly in order to provide the required transmit and/or receive
- side delay from the perspective of the PHY device. Conversely, if the Ethernet
- MAC driver looks at the phy_interface_t value, for any other mode but
- PHY_INTERFACE_MODE_RGMII, it should make sure that the MAC-level delays are
- disabled.
-
- In case neither the Ethernet MAC, nor the PHY are capable of providing the
- required delays, as defined per the RGMII standard, several options may be
- available:
-
- * Some SoCs may offer a pin pad/mux/controller capable of configuring a given
- set of pins'strength, delays, and voltage; and it may be a suitable
- option to insert the expected 2ns RGMII delay.
-
- * Modifying the PCB design to include a fixed delay (e.g: using a specifically
- designed serpentine), which may not require software configuration at all.
-
-Common problems with RGMII delay mismatch
-
- When there is a RGMII delay mismatch between the Ethernet MAC and the PHY, this
- will most likely result in the clock and data line signals to be unstable when
- the PHY or MAC take a snapshot of these signals to translate them into logical
- 1 or 0 states and reconstruct the data being transmitted/received. Typical
- symptoms include:
-
- * Transmission/reception partially works, and there is frequent or occasional
- packet loss observed
-
- * Ethernet MAC may report some or all packets ingressing with a FCS/CRC error,
- or just discard them all
-
- * Switching to lower speeds such as 10/100Mbits/sec makes the problem go away
- (since there is enough setup/hold time in that case)
-
-
-Connecting to a PHY
-
- Sometime during startup, the network driver needs to establish a connection
- between the PHY device, and the network device. At this time, the PHY's bus
- and drivers need to all have been loaded, so it is ready for the connection.
- At this point, there are several ways to connect to the PHY:
-
- 1) The PAL handles everything, and only calls the network driver when
- the link state changes, so it can react.
-
- 2) The PAL handles everything except interrupts (usually because the
- controller has the interrupt registers).
-
- 3) The PAL handles everything, but checks in with the driver every second,
- allowing the network driver to react first to any changes before the PAL
- does.
-
- 4) The PAL serves only as a library of functions, with the network device
- manually calling functions to update status, and configure the PHY
-
-
-Letting the PHY Abstraction Layer do Everything
-
- If you choose option 1 (The hope is that every driver can, but to still be
- useful to drivers that can't), connecting to the PHY is simple:
-
- First, you need a function to react to changes in the link state. This
- function follows this protocol:
-
- static void adjust_link(struct net_device *dev);
-
- Next, you need to know the device name of the PHY connected to this device.
- The name will look something like, "0:00", where the first number is the
- bus id, and the second is the PHY's address on that bus. Typically,
- the bus is responsible for making its ID unique.
-
- Now, to connect, just call this function:
-
- phydev = phy_connect(dev, phy_name, &adjust_link, interface);
-
- phydev is a pointer to the phy_device structure which represents the PHY. If
- phy_connect is successful, it will return the pointer. dev, here, is the
- pointer to your net_device. Once done, this function will have started the
- PHY's software state machine, and registered for the PHY's interrupt, if it
- has one. The phydev structure will be populated with information about the
- current state, though the PHY will not yet be truly operational at this
- point.
-
- PHY-specific flags should be set in phydev->dev_flags prior to the call
- to phy_connect() such that the underlying PHY driver can check for flags
- and perform specific operations based on them.
- This is useful if the system has put hardware restrictions on
- the PHY/controller, of which the PHY needs to be aware.
-
- interface is a u32 which specifies the connection type used
- between the controller and the PHY. Examples are GMII, MII,
- RGMII, and SGMII. For a full list, see include/linux/phy.h
-
- Now just make sure that phydev->supported and phydev->advertising have any
- values pruned from them which don't make sense for your controller (a 10/100
- controller may be connected to a gigabit capable PHY, so you would need to
- mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions
- for these bitfields. Note that you should not SET any bits, except the
- SUPPORTED_Pause and SUPPORTED_AsymPause bits (see below), or the PHY may get
- put into an unsupported state.
-
- Lastly, once the controller is ready to handle network traffic, you call
- phy_start(phydev). This tells the PAL that you are ready, and configures the
- PHY to connect to the network. If you want to handle your own interrupts,
- just set phydev->irq to PHY_IGNORE_INTERRUPT before you call phy_start.
- Similarly, if you don't want to use interrupts, set phydev->irq to PHY_POLL.
-
- When you want to disconnect from the network (even if just briefly), you call
- phy_stop(phydev).
-
-Pause frames / flow control
-
- The PHY does not participate directly in flow control/pause frames except by
- making sure that the SUPPORTED_Pause and SUPPORTED_AsymPause bits are set in
- MII_ADVERTISE to indicate towards the link partner that the Ethernet MAC
- controller supports such a thing. Since flow control/pause frames generation
- involves the Ethernet MAC driver, it is recommended that this driver takes care
- of properly indicating advertisement and support for such features by setting
- the SUPPORTED_Pause and SUPPORTED_AsymPause bits accordingly. This can be done
- either before or after phy_connect() and/or as a result of implementing the
- ethtool::set_pauseparam feature.
-
-
-Keeping Close Tabs on the PAL
-
- It is possible that the PAL's built-in state machine needs a little help to
- keep your network device and the PHY properly in sync. If so, you can
- register a helper function when connecting to the PHY, which will be called
- every second before the state machine reacts to any changes. To do this, you
- need to manually call phy_attach() and phy_prepare_link(), and then call
- phy_start_machine() with the second argument set to point to your special
- handler.
-
- Currently there are no examples of how to use this functionality, and testing
- on it has been limited because the author does not have any drivers which use
- it (they all use option 1). So Caveat Emptor.
-
-Doing it all yourself
-
- There's a remote chance that the PAL's built-in state machine cannot track
- the complex interactions between the PHY and your network device. If this is
- so, you can simply call phy_attach(), and not call phy_start_machine or
- phy_prepare_link(). This will mean that phydev->state is entirely yours to
- handle (phy_start and phy_stop toggle between some of the states, so you
- might need to avoid them).
-
- An effort has been made to make sure that useful functionality can be
- accessed without the state-machine running, and most of these functions are
- descended from functions which did not interact with a complex state-machine.
- However, again, no effort has been made so far to test running without the
- state machine, so tryer beware.
-
- Here is a brief rundown of the functions:
-
- int phy_read(struct phy_device *phydev, u16 regnum);
- int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
-
- Simple read/write primitives. They invoke the bus's read/write function
- pointers.
-
- void phy_print_status(struct phy_device *phydev);
-
- A convenience function to print out the PHY status neatly.
-
- int phy_start_interrupts(struct phy_device *phydev);
- int phy_stop_interrupts(struct phy_device *phydev);
-
- Requests the IRQ for the PHY interrupts, then enables them for
- start, or disables then frees them for stop.
-
- struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
- phy_interface_t interface);
-
- Attaches a network device to a particular PHY, binding the PHY to a generic
- driver if none was found during bus initialization.
-
- int phy_start_aneg(struct phy_device *phydev);
-
- Using variables inside the phydev structure, either configures advertising
- and resets autonegotiation, or disables autonegotiation, and configures
- forced settings.
-
- static inline int phy_read_status(struct phy_device *phydev);
-
- Fills the phydev structure with up-to-date information about the current
- settings in the PHY.
-
- int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
-
- Ethtool convenience functions.
-
- int phy_mii_ioctl(struct phy_device *phydev,
- struct mii_ioctl_data *mii_data, int cmd);
-
- The MII ioctl. Note that this function will completely screw up the state
- machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to
- use this only to write registers which are not standard, and don't set off
- a renegotiation.
-
-
-PHY Device Drivers
-
- With the PHY Abstraction Layer, adding support for new PHYs is
- quite easy. In some cases, no work is required at all! However,
- many PHYs require a little hand-holding to get up-and-running.
-
-Generic PHY driver
-
- If the desired PHY doesn't have any errata, quirks, or special
- features you want to support, then it may be best to not add
- support, and let the PHY Abstraction Layer's Generic PHY Driver
- do all of the work.
-
-Writing a PHY driver
-
- If you do need to write a PHY driver, the first thing to do is
- make sure it can be matched with an appropriate PHY device.
- This is done during bus initialization by reading the device's
- UID (stored in registers 2 and 3), then comparing it to each
- driver's phy_id field by ANDing it with each driver's
- phy_id_mask field. Also, it needs a name. Here's an example:
-
- static struct phy_driver dm9161_driver = {
- .phy_id = 0x0181b880,
- .name = "Davicom DM9161E",
- .phy_id_mask = 0x0ffffff0,
- ...
- }
-
- Next, you need to specify what features (speed, duplex, autoneg,
- etc) your PHY device and driver support. Most PHYs support
- PHY_BASIC_FEATURES, but you can look in include/mii.h for other
- features.
-
- Each driver consists of a number of function pointers, documented
- in include/linux/phy.h under the phy_driver structure.
-
- Of these, only config_aneg and read_status are required to be
- assigned by the driver code. The rest are optional. Also, it is
- preferred to use the generic phy driver's versions of these two
- functions if at all possible: genphy_read_status and
- genphy_config_aneg. If this is not possible, it is likely that
- you only need to perform some actions before and after invoking
- these functions, and so your functions will wrap the generic
- ones.
-
- Feel free to look at the Marvell, Cicada, and Davicom drivers in
- drivers/net/phy/ for examples (the lxt and qsemi drivers have
- not been tested as of this writing).
-
- The PHY's MMD register accesses are handled by the PAL framework
- by default, but can be overridden by a specific PHY driver if
- required. This could be the case if a PHY was released for
- manufacturing before the MMD PHY register definitions were
- standardized by the IEEE. Most modern PHYs will be able to use
- the generic PAL framework for accessing the PHY's MMD registers.
- An example of such usage is for Energy Efficient Ethernet support,
- implemented in the PAL. This support uses the PAL to access MMD
- registers for EEE query and configuration if the PHY supports
- the IEEE standard access mechanisms, or can use the PHY's specific
- access interfaces if overridden by the specific PHY driver. See
- the Micrel driver in drivers/net/phy/ for an example of how this
- can be implemented.
-
-Board Fixups
-
- Sometimes the specific interaction between the platform and the PHY requires
- special handling. For instance, to change where the PHY's clock input is,
- or to add a delay to account for latency issues in the data path. In order
- to support such contingencies, the PHY Layer allows platform code to register
- fixups to be run when the PHY is brought up (or subsequently reset).
-
- When the PHY Layer brings up a PHY it checks to see if there are any fixups
- registered for it, matching based on UID (contained in the PHY device's phy_id
- field) and the bus identifier (contained in phydev->dev.bus_id). Both must
- match, however two constants, PHY_ANY_ID and PHY_ANY_UID, are provided as
- wildcards for the bus ID and UID, respectively.
-
- When a match is found, the PHY layer will invoke the run function associated
- with the fixup. This function is passed a pointer to the phy_device of
- interest. It should therefore only operate on that PHY.
-
- The platform code can either register the fixup using phy_register_fixup():
-
- int phy_register_fixup(const char *phy_id,
- u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *));
-
- Or using one of the two stubs, phy_register_fixup_for_uid() and
- phy_register_fixup_for_id():
-
- int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *));
- int phy_register_fixup_for_id(const char *phy_id,
- int (*run)(struct phy_device *));
-
- The stubs set one of the two matching criteria, and set the other one to
- match anything.
-
- When phy_register_fixup() or *_for_uid()/*_for_id() is called at module,
- unregister fixup and free allocate memory are required.
-
- Call one of following function before unloading module.
-
- int phy_unregister_fixup(const char *phy_id, u32 phy_uid, u32 phy_uid_mask);
- int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
- int phy_register_fixup_for_id(const char *phy_id);
-
-Standards
-
- IEEE Standard 802.3: CSMA/CD Access Method and Physical Layer Specifications, Section Two:
- http://standards.ieee.org/getieee802/download/802.3-2008_section2.pdf
-
- RGMII v1.3:
- http://web.archive.org/web/20160303212629/http://www.hp.com/rnd/pdfs/RGMIIv1_3.pdf
-
- RGMII v2.0:
- http://web.archive.org/web/20160303171328/http://www.hp.com/rnd/pdfs/RGMIIv2_0_final_hp.pdf
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
index fe8f741193be..c5642f430d2e 100644
--- a/Documentation/networking/snmp_counter.rst
+++ b/Documentation/networking/snmp_counter.rst
@@ -1,16 +1,17 @@
-===========
+============
SNMP counter
-===========
+============
This document explains the meaning of SNMP counters.
General IPv4 counters
-====================
+=====================
All layer 4 packets and ICMP packets will change these counters, but
these counters won't be changed by layer 2 packets (such as STP) or
ARP packets.
* IpInReceives
+
Defined in `RFC1213 ipInReceives`_
.. _RFC1213 ipInReceives: https://tools.ietf.org/html/rfc1213#page-26
@@ -23,6 +24,7 @@ and so on). It indicates the number of aggregated segments after
GRO/LRO.
* IpInDelivers
+
Defined in `RFC1213 ipInDelivers`_
.. _RFC1213 ipInDelivers: https://tools.ietf.org/html/rfc1213#page-28
@@ -33,6 +35,7 @@ supported protocols will be delivered, if someone listens on the raw
socket, all valid IP packets will be delivered.
* IpOutRequests
+
Defined in `RFC1213 ipOutRequests`_
.. _RFC1213 ipOutRequests: https://tools.ietf.org/html/rfc1213#page-28
@@ -42,6 +45,7 @@ multicast packets, and would always be updated together with
IpExtOutOctets.
* IpExtInOctets and IpExtOutOctets
+
They are Linux kernel extensions, no RFC definitions. Please note,
RFC1213 indeed defines ifInOctets and ifOutOctets, but they
are different things. The ifInOctets and ifOutOctets include the MAC
@@ -49,6 +53,7 @@ layer header size but IpExtInOctets and IpExtOutOctets don't, they
only include the IP layer header and the IP layer data.
* IpExtInNoECTPkts, IpExtInECT1Pkts, IpExtInECT0Pkts, IpExtInCEPkts
+
They indicate the number of four kinds of ECN IP packets, please refer
`Explicit Congestion Notification`_ for more details.
@@ -60,6 +65,7 @@ for the same packet, you might find that IpInReceives count 1, but
IpExtInNoECTPkts counts 2 or more.
* IpInHdrErrors
+
Defined in `RFC1213 ipInHdrErrors`_. It indicates the packet is
dropped due to the IP header error. It might happen in both IP input
and IP forward paths.
@@ -67,6 +73,7 @@ and IP forward paths.
.. _RFC1213 ipInHdrErrors: https://tools.ietf.org/html/rfc1213#page-27
* IpInAddrErrors
+
Defined in `RFC1213 ipInAddrErrors`_. It will be increased in two
scenarios: (1) The IP address is invalid. (2) The destination IP
address is not a local address and IP forwarding is not enabled
@@ -74,6 +81,7 @@ address is not a local address and IP forwarding is not enabled
.. _RFC1213 ipInAddrErrors: https://tools.ietf.org/html/rfc1213#page-27
* IpExtInNoRoutes
+
This counter means the packet is dropped when the IP stack receives a
packet and can't find a route for it from the route table. It might
happen when IP forwarding is enabled and the destination IP address is
@@ -81,6 +89,7 @@ not a local address and there is no route for the destination IP
address.
* IpInUnknownProtos
+
Defined in `RFC1213 ipInUnknownProtos`_. It will be increased if the
layer 4 protocol is unsupported by kernel. If an application is using
raw socket, kernel will always deliver the packet to the raw socket
@@ -89,10 +98,12 @@ and this counter won't be increased.
.. _RFC1213 ipInUnknownProtos: https://tools.ietf.org/html/rfc1213#page-27
* IpExtInTruncatedPkts
+
For IPv4 packet, it means the actual data size is smaller than the
"Total Length" field in the IPv4 header.
* IpInDiscards
+
Defined in `RFC1213 ipInDiscards`_. It indicates the packet is dropped
in the IP receiving path and due to kernel internal reasons (e.g. no
enough memory).
@@ -100,20 +111,23 @@ enough memory).
.. _RFC1213 ipInDiscards: https://tools.ietf.org/html/rfc1213#page-28
* IpOutDiscards
+
Defined in `RFC1213 ipOutDiscards`_. It indicates the packet is
dropped in the IP sending path and due to kernel internal reasons.
.. _RFC1213 ipOutDiscards: https://tools.ietf.org/html/rfc1213#page-28
* IpOutNoRoutes
+
Defined in `RFC1213 ipOutNoRoutes`_. It indicates the packet is
dropped in the IP sending path and no route is found for it.
.. _RFC1213 ipOutNoRoutes: https://tools.ietf.org/html/rfc1213#page-29
ICMP counters
-============
+=============
* IcmpInMsgs and IcmpOutMsgs
+
Defined by `RFC1213 icmpInMsgs`_ and `RFC1213 icmpOutMsgs`_
.. _RFC1213 icmpInMsgs: https://tools.ietf.org/html/rfc1213#page-41
@@ -126,6 +140,7 @@ IcmpOutMsgs would still be updated if the IP header is constructed by
a userspace program.
* ICMP named types
+
| These counters include most of common ICMP types, they are:
| IcmpInDestUnreachs: `RFC1213 icmpInDestUnreachs`_
| IcmpInTimeExcds: `RFC1213 icmpInTimeExcds`_
@@ -180,6 +195,7 @@ straightforward. The 'In' counter means kernel receives such a packet
and the 'Out' counter means kernel sends such a packet.
* ICMP numeric types
+
They are IcmpMsgInType[N] and IcmpMsgOutType[N], the [N] indicates the
ICMP type number. These counters track all kinds of ICMP packets. The
ICMP type number definition could be found in the `ICMP parameters`_
@@ -192,12 +208,14 @@ IcmpMsgOutType8 would increase 1. And if kernel gets an ICMP Echo Reply
packet, IcmpMsgInType0 would increase 1.
* IcmpInCsumErrors
+
This counter indicates the checksum of the ICMP packet is
wrong. Kernel verifies the checksum after updating the IcmpInMsgs and
before updating IcmpMsgInType[N]. If a packet has bad checksum, the
IcmpInMsgs would be updated but none of IcmpMsgInType[N] would be updated.
* IcmpInErrors and IcmpOutErrors
+
Defined by `RFC1213 icmpInErrors`_ and `RFC1213 icmpOutErrors`_
.. _RFC1213 icmpInErrors: https://tools.ietf.org/html/rfc1213#page-41
@@ -209,7 +227,7 @@ and the sending packet path use IcmpOutErrors. When IcmpInCsumErrors
is increased, IcmpInErrors would always be increased too.
relationship of the ICMP counters
--------------------------------
+---------------------------------
The sum of IcmpMsgOutType[N] is always equal to IcmpOutMsgs, as they
are updated at the same time. The sum of IcmpMsgInType[N] plus
IcmpInErrors should be equal or larger than IcmpInMsgs. When kernel
@@ -229,8 +247,9 @@ IcmpInMsgs should be less than the sum of IcmpMsgOutType[N] plus
IcmpInErrors.
General TCP counters
-==================
+====================
* TcpInSegs
+
Defined in `RFC1213 tcpInSegs`_
.. _RFC1213 tcpInSegs: https://tools.ietf.org/html/rfc1213#page-48
@@ -247,6 +266,7 @@ isn't aware of GRO. So if two packets are merged by GRO, the TcpInSegs
counter would only increase 1.
* TcpOutSegs
+
Defined in `RFC1213 tcpOutSegs`_
.. _RFC1213 tcpOutSegs: https://tools.ietf.org/html/rfc1213#page-48
@@ -258,6 +278,7 @@ GSO, so if a packet would be split to 2 by GSO, TcpOutSegs will
increase 2.
* TcpActiveOpens
+
Defined in `RFC1213 tcpActiveOpens`_
.. _RFC1213 tcpActiveOpens: https://tools.ietf.org/html/rfc1213#page-47
@@ -267,6 +288,7 @@ state. Every time TcpActiveOpens increases 1, TcpOutSegs should always
increase 1.
* TcpPassiveOpens
+
Defined in `RFC1213 tcpPassiveOpens`_
.. _RFC1213 tcpPassiveOpens: https://tools.ietf.org/html/rfc1213#page-47
@@ -275,6 +297,7 @@ It means the TCP layer receives a SYN, replies a SYN+ACK, come into
the SYN-RCVD state.
* TcpExtTCPRcvCoalesce
+
When packets are received by the TCP layer and are not be read by the
application, the TCP layer will try to merge them. This counter
indicate how many packets are merged in such situation. If GRO is
@@ -282,12 +305,14 @@ enabled, lots of packets would be merged by GRO, these packets
wouldn't be counted to TcpExtTCPRcvCoalesce.
* TcpExtTCPAutoCorking
+
When sending packets, the TCP layer will try to merge small packets to
a bigger one. This counter increase 1 for every packet merged in such
situation. Please refer to the LWN article for more details:
https://lwn.net/Articles/576263/
* TcpExtTCPOrigDataSent
+
This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
explaination below::
@@ -297,6 +322,7 @@ explaination below::
more useful to track the TCP retransmission rate.
* TCPSynRetrans
+
This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
explaination below::
@@ -304,6 +330,7 @@ explaination below::
retransmissions into SYN, fast-retransmits, timeout retransmits, etc.
* TCPFastOpenActiveFail
+
This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
explaination below::
@@ -313,6 +340,7 @@ explaination below::
.. _kernel commit f19c29e3e391: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f19c29e3e391a66a273e9afebaf01917245148cd
* TcpExtListenOverflows and TcpExtListenDrops
+
When kernel receives a SYN from a client, and if the TCP accept queue
is full, kernel will drop the SYN and add 1 to TcpExtListenOverflows.
At the same time kernel will also add 1 to TcpExtListenDrops. When a
@@ -336,6 +364,8 @@ time client replies ACK, this socket will get another chance to move
to the accept queue.
+TCP Fast Open
+=============
* TcpEstabResets
Defined in `RFC1213 tcpEstabResets`_.
@@ -389,20 +419,23 @@ will disable the fast path at first, and try to enable it after kernel
receives packets.
* TcpExtTCPPureAcks and TcpExtTCPHPAcks
+
If a packet set ACK flag and has no data, it is a pure ACK packet, if
kernel handles it in the fast path, TcpExtTCPHPAcks will increase 1,
if kernel handles it in the slow path, TcpExtTCPPureAcks will
increase 1.
* TcpExtTCPHPHits
+
If a TCP packet has data (which means it is not a pure ACK packet),
and this packet is handled in the fast path, TcpExtTCPHPHits will
increase 1.
TCP abort
-========
+=========
* TcpExtTCPAbortOnData
+
It means TCP layer has data in flight, but need to close the
connection. So TCP layer sends a RST to the other side, indicate the
connection is not closed very graceful. An easy way to increase this
@@ -421,11 +454,13 @@ when the application closes a connection, kernel will send a RST
immediately and increase the TcpExtTCPAbortOnData counter.
* TcpExtTCPAbortOnClose
+
This counter means the application has unread data in the TCP layer when
the application wants to close the TCP connection. In such a situation,
kernel will send a RST to the other side of the TCP connection.
* TcpExtTCPAbortOnMemory
+
When an application closes a TCP connection, kernel still need to track
the connection, let it complete the TCP disconnect process. E.g. an
app calls the close method of a socket, kernel sends fin to the other
@@ -447,10 +482,12 @@ the tcp_mem. Please refer the tcp_mem section in the `TCP man page`_:
* TcpExtTCPAbortOnTimeout
+
This counter will increase when any of the TCP timers expire. In such
situation, kernel won't send RST, just give up the connection.
* TcpExtTCPAbortOnLinger
+
When a TCP connection comes into FIN_WAIT_2 state, instead of waiting
for the fin packet from the other side, kernel could send a RST and
delete the socket immediately. This is not the default behavior of
@@ -458,6 +495,7 @@ Linux kernel TCP stack. By configuring the TCP_LINGER2 socket option,
you could let kernel follow this behavior.
* TcpExtTCPAbortFailed
+
The kernel TCP layer will send RST if the `RFC2525 2.17 section`_ is
satisfied. If an internal error occurs during this process,
TcpExtTCPAbortFailed will be increased.
@@ -465,7 +503,7 @@ TcpExtTCPAbortFailed will be increased.
.. _RFC2525 2.17 section: https://tools.ietf.org/html/rfc2525#page-50
TCP Hybrid Slow Start
-====================
+=====================
The Hybrid Slow Start algorithm is an enhancement of the traditional
TCP congestion window Slow Start algorithm. It uses two pieces of
information to detect whether the max bandwidth of the TCP path is
@@ -481,23 +519,27 @@ relate with the Hybrid Slow Start algorithm.
.. _Hybrid Slow Start paper: https://pdfs.semanticscholar.org/25e9/ef3f03315782c7f1cbcd31b587857adae7d1.pdf
* TcpExtTCPHystartTrainDetect
+
How many times the ACK train length threshold is detected
* TcpExtTCPHystartTrainCwnd
+
The sum of CWND detected by ACK train length. Dividing this value by
TcpExtTCPHystartTrainDetect is the average CWND which detected by the
ACK train length.
* TcpExtTCPHystartDelayDetect
+
How many times the packet delay threshold is detected.
* TcpExtTCPHystartDelayCwnd
+
The sum of CWND detected by packet delay. Dividing this value by
TcpExtTCPHystartDelayDetect is the average CWND which detected by the
packet delay.
TCP retransmission and congestion control
-======================================
+=========================================
The TCP protocol has two retransmission mechanisms: SACK and fast
recovery. They are exclusive with each other. When SACK is enabled,
the kernel TCP stack would use SACK, or kernel would use fast
@@ -516,12 +558,14 @@ https://pdfs.semanticscholar.org/0e9c/968d09ab2e53e24c4dca5b2d67c7f7140f8e.pdf
.. _RFC6582: https://tools.ietf.org/html/rfc6582
* TcpExtTCPRenoRecovery and TcpExtTCPSackRecovery
+
When the congestion control comes into Recovery state, if sack is
used, TcpExtTCPSackRecovery increases 1, if sack is not used,
TcpExtTCPRenoRecovery increases 1. These two counters mean the TCP
stack begins to retransmit the lost packets.
* TcpExtTCPSACKReneging
+
A packet was acknowledged by SACK, but the receiver has dropped this
packet, so the sender needs to retransmit this packet. In this
situation, the sender adds 1 to TcpExtTCPSACKReneging. A receiver
@@ -532,6 +576,7 @@ the RTO expires for this packet, then the sender assumes this packet
has been dropped by the receiver.
* TcpExtTCPRenoReorder
+
The reorder packet is detected by fast recovery. It would only be used
if SACK is disabled. The fast recovery algorithm detects recorder by
the duplicate ACK number. E.g., if retransmission is triggered, and
@@ -542,6 +587,7 @@ order packet. Thus the sender would find more ACks than its
expectation, and the sender knows out of order occurs.
* TcpExtTCPTSReorder
+
The reorder packet is detected when a hole is filled. E.g., assume the
sender sends packet 1,2,3,4,5, and the receiving order is
1,2,4,5,3. When the sender receives the ACK of packet 3 (which will
@@ -551,6 +597,7 @@ fill the hole), two conditions will let TcpExtTCPTSReorder increase
than the retransmission timestamp.
* TcpExtTCPSACKReorder
+
The reorder packet detected by SACK. The SACK has two methods to
detect reorder: (1) DSACK is received by the sender. It means the
sender sends the same packet more than one times. And the only reason
@@ -574,10 +621,12 @@ sender side.
.. _RFC2883 : https://tools.ietf.org/html/rfc2883
* TcpExtTCPDSACKOldSent
+
The TCP stack receives a duplicate packet which has been acked, so it
sends a DSACK to the sender.
* TcpExtTCPDSACKOfoSent
+
The TCP stack receives an out of order duplicate packet, so it sends a
DSACK to the sender.
@@ -586,6 +635,7 @@ The TCP stack receives a DSACK, which indicates an acknowledged
duplicate packet is received.
* TcpExtTCPDSACKOfoRecv
+
The TCP stack receives a DSACK, which indicate an out of order
duplicate packet is received.
@@ -640,23 +690,26 @@ A skb should be shifted or merged, but the TCP stack doesn't do it for
some reasons.
TCP out of order
-===============
+================
* TcpExtTCPOFOQueue
+
The TCP layer receives an out of order packet and has enough memory
to queue it.
* TcpExtTCPOFODrop
+
The TCP layer receives an out of order packet but doesn't have enough
memory, so drops it. Such packets won't be counted into
TcpExtTCPOFOQueue.
* TcpExtTCPOFOMerge
+
The received out of order packet has an overlay with the previous
packet. the overlay part will be dropped. All of TcpExtTCPOFOMerge
packets will also be counted into TcpExtTCPOFOQueue.
TCP PAWS
-=======
+========
PAWS (Protection Against Wrapped Sequence numbers) is an algorithm
which is used to drop old packets. It depends on the TCP
timestamps. For detail information, please refer the `timestamp wiki`_
@@ -666,13 +719,15 @@ and the `RFC of PAWS`_.
.. _timestamp wiki: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_timestamps
* TcpExtPAWSActive
+
Packets are dropped by PAWS in Syn-Sent status.
* TcpExtPAWSEstab
+
Packets are dropped by PAWS in any status other than Syn-Sent.
TCP ACK skip
-===========
+============
In some scenarios, kernel would avoid sending duplicate ACKs too
frequently. Please find more details in the tcp_invalid_ratelimit
section of the `sysctl document`_. When kernel decides to skip an ACK
@@ -684,6 +739,7 @@ it has no data.
.. _sysctl document: https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
* TcpExtTCPACKSkippedSynRecv
+
The ACK is skipped in Syn-Recv status. The Syn-Recv status means the
TCP stack receives a SYN and replies SYN+ACK. Now the TCP stack is
waiting for an ACK. Generally, the TCP stack doesn't need to send ACK
@@ -697,6 +753,7 @@ increase TcpExtTCPACKSkippedSynRecv.
* TcpExtTCPACKSkippedPAWS
+
The ACK is skipped due to PAWS (Protect Against Wrapped Sequence
numbers) check fails. If the PAWS check fails in Syn-Recv, Fin-Wait-2
or Time-Wait statuses, the skipped ACK would be counted to
@@ -705,18 +762,22 @@ TcpExtTCPACKSkippedTimeWait. In all other statuses, the skipped ACK
would be counted to TcpExtTCPACKSkippedPAWS.
* TcpExtTCPACKSkippedSeq
+
The sequence number is out of window and the timestamp passes the PAWS
check and the TCP status is not Syn-Recv, Fin-Wait-2, and Time-Wait.
* TcpExtTCPACKSkippedFinWait2
+
The ACK is skipped in Fin-Wait-2 status, the reason would be either
PAWS check fails or the received sequence number is out of window.
* TcpExtTCPACKSkippedTimeWait
+
Tha ACK is skipped in Time-Wait status, the reason would be either
PAWS check failed or the received sequence number is out of window.
* TcpExtTCPACKSkippedChallenge
+
The ACK is skipped if the ACK is a challenge ACK. The RFC 5961 defines
3 kind of challenge ACK, please refer `RFC 5961 section 3.2`_,
`RFC 5961 section 4.2`_ and `RFC 5961 section 5.2`_. Besides these
@@ -784,10 +845,10 @@ A TLP probe packet is sent.
A packet loss is detected and recovered by TLP.
examples
-=======
+========
ping test
---------
+---------
Run the ping command against the public dns server 8.8.8.8::
nstatuser@nstat-a:~$ ping 8.8.8.8 -c 1
@@ -831,7 +892,7 @@ and its corresponding Echo Reply packet are constructed by:
So the IpExtInOctets and IpExtOutOctets are 20+16+48=84.
tcp 3-way handshake
-------------------
+-------------------
On server side, we run::
nstatuser@nstat-b:~$ nc -lknv 0.0.0.0 9000
@@ -873,7 +934,7 @@ ACK, so client sent 2 packets, received 1 packet, TcpInSegs increased
1, TcpOutSegs increased 2.
TCP normal traffic
------------------
+------------------
Run nc on server::
nstatuser@nstat-b:~$ nc -lkv 0.0.0.0 9000
@@ -996,7 +1057,7 @@ and the packet received from client qualified for fast path, so it
was counted into 'TcpExtTCPHPHits'.
TcpExtTCPAbortOnClose
---------------------
+---------------------
On the server side, we run below python script::
import socket
@@ -1030,7 +1091,7 @@ If we run tcpdump on the server side, we could find the server sent a
RST after we type Ctrl-C.
TcpExtTCPAbortOnMemory and TcpExtTCPAbortOnTimeout
------------------------------------------------
+---------------------------------------------------
Below is an example which let the orphan socket count be higher than
net.ipv4.tcp_max_orphans.
Change tcp_max_orphans to a smaller value on client::
@@ -1152,7 +1213,7 @@ FIN_WAIT_1 state finally. So we wait for a few minutes, we could find
TcpExtTCPAbortOnTimeout 10 0.0
TcpExtTCPAbortOnLinger
----------------------
+----------------------
The server side code::
nstatuser@nstat-b:~$ cat server_linger.py
@@ -1197,7 +1258,7 @@ After run client_linger.py, check the output of nstat::
TcpExtTCPAbortOnLinger 1 0.0
TcpExtTCPRcvCoalesce
--------------------
+--------------------
On the server, we run a program which listen on TCP port 9000, but
doesn't read any data::
@@ -1257,7 +1318,7 @@ the receiving queue. So the TCP layer merged the two packets, and we
could find the TcpExtTCPRcvCoalesce increased 1.
TcpExtListenOverflows and TcpExtListenDrops
-----------------------------------------
+-------------------------------------------
On server, run the nc command, listen on port 9000::
nstatuser@nstat-b:~$ nc -lkv 0.0.0.0 9000
@@ -1305,7 +1366,7 @@ TcpExtListenOverflows and TcpExtListenDrops would be larger, because
the SYN of the 4th nc was dropped, the client was retrying.
IpInAddrErrors, IpExtInNoRoutes and IpOutNoRoutes
-----------------------------------------------
+-------------------------------------------------
server A IP address: 192.168.122.250
server B IP address: 192.168.122.251
Prepare on server A, add a route to server B::
@@ -1400,7 +1461,7 @@ a route for the 8.8.8.8 IP address, so server B increased
IpOutNoRoutes.
TcpExtTCPACKSkippedSynRecv
-------------------------
+--------------------------
In this test, we send 3 same SYN packets from client to server. The
first SYN will let server create a socket, set it to Syn-Recv status,
and reply a SYN/ACK. The second SYN will let server reply the SYN/ACK
@@ -1448,7 +1509,7 @@ Check snmp cunter on nstat-b::
As we expected, TcpExtTCPACKSkippedSynRecv is 1.
TcpExtTCPACKSkippedPAWS
-----------------------
+-----------------------
To trigger PAWS, we could send an old SYN.
On nstat-b, let nc listen on port 9000::
@@ -1485,7 +1546,7 @@ failed, the nstat-b replied an ACK for the first SYN, skipped the ACK
for the second SYN, and updated TcpExtTCPACKSkippedPAWS.
TcpExtTCPACKSkippedSeq
---------------------
+----------------------
To trigger TcpExtTCPACKSkippedSeq, we send packets which have valid
timestamp (to pass PAWS check) but the sequence number is out of
window. The linux TCP stack would avoid to skip if the packet has
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 82236a17b5e6..f3244d87512a 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -196,7 +196,7 @@ The switch device will learn/forget source MAC address/VLAN on ingress packets
and notify the switch driver of the mac/vlan/port tuples. The switch driver,
in turn, will notify the bridge driver using the switchdev notifier call:
- err = call_switchdev_notifiers(val, dev, info);
+ err = call_switchdev_notifiers(val, dev, info, extack);
Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when
forgetting, and info points to a struct switchdev_notifier_fdb_info. On
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 2793d4eac55f..bc0680706870 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -291,6 +291,20 @@ user space is responsible for creating them if needed.
Default : 0 (for compatibility reasons)
+devconf_inherit_init_net
+----------------------------
+
+Controls if a new network namespace should inherit all current
+settings under /proc/sys/net/{ipv4,ipv6}/conf/{all,default}/. By
+default, we keep the current behavior: for IPv4 we inherit all current
+settings from init_net and for IPv6 we reset all settings to default.
+
+If set to 1, both IPv4 and IPv6 settings are forced to inherit from
+current ones in init_net. If set to 2, both IPv4 and IPv6 settings are
+forced to reset to their default values.
+
+Default : 0 (for compatibility reasons)
+
2. /proc/sys/net/unix - Parameters for Unix domain sockets
-------------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 2d3c1918f1b0..019a2bcfbd09 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4123,7 +4123,7 @@ S: Maintained
F: drivers/media/dvb-frontends/cxd2820r*
CXGB3 ETHERNET DRIVER (CXGB3)
-M: Arjun Vynipadath <arjun@chelsio.com>
+M: Vishal Kulkarni <vishal@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -4152,7 +4152,7 @@ S: Supported
F: drivers/crypto/chelsio
CXGB4 ETHERNET DRIVER (CXGB4)
-M: Arjun Vynipadath <arjun@chelsio.com>
+M: Vishal Kulkarni <vishal@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -6024,6 +6024,12 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/dma/fsldma.*
+FREESCALE ENETC ETHERNET DRIVERS
+M: Claudiu Manoil <claudiu.manoil@nxp.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/freescale/enetc/
+
FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
M: Claudiu Manoil <claudiu.manoil@nxp.com>
L: netdev@vger.kernel.org
@@ -6088,6 +6094,7 @@ M: Yangbo Lu <yangbo.lu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/ptp/ptp_qoriq.c
+F: drivers/ptp/ptp_qoriq_debugfs.c
F: include/linux/fsl/ptp_qoriq.h
F: Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
@@ -10577,6 +10584,7 @@ F: Documentation/devicetree/bindings/net/dsa/
F: net/dsa/
F: include/net/dsa.h
F: include/linux/dsa/
+F: include/linux/platform_data/dsa.h
F: drivers/net/dsa/
NETWORKING [GENERAL]
@@ -12609,6 +12617,14 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/qualcomm/emac/
+QUALCOMM ETHQOS ETHERNET DRIVER
+M: Vinod Koul <vkoul@kernel.org>
+M: Niklas Cassel <niklas.cassel@linaro.org>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+F: Documentation/devicetree/bindings/net/qcom,dwmac.txt
+
QUALCOMM GENERIC INTERFACE I2C DRIVER
M: Alok Chauhan <alokc@codeaurora.org>
M: Karthikeyan Ramasubramanian <kramasub@codeaurora.org>
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 065fb372e355..b1c9b542c021 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -115,4 +115,6 @@
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 62
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index ed0941292172..ad75959b99c1 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -706,6 +706,7 @@
fsl,tmr-fiper1 = <999999995>;
fsl,tmr-fiper2 = <99990>;
fsl,max-adj = <499999999>;
+ fsl,extts-fifo;
};
enet0: ethernet@2d10000 {
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 83a7ec4c16d0..c67f92bfa30e 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -20,7 +20,7 @@
#include <linux/delay.h>
#include <linux/clk-provider.h>
#include <linux/cpu.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/system_misc.h>
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index a3c1336d30c9..c65ab7db36ad 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -16,7 +16,7 @@
#include <linux/mtd/physmap.h>
#include <linux/mv643xx_eth.h>
#include <linux/ethtool.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index 252efe29bd1a..76b8138d9d79 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -17,7 +17,7 @@
#include <linux/mv643xx_eth.h>
#include <linux/ethtool.h>
#include <linux/i2c.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index f4f1dbe1d91d..5f388a1ed1e4 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -18,7 +18,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/ethtool.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index d162d4c7f85d..83589a28a491 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -15,7 +15,7 @@
#include <linux/mtd/physmap.h>
#include <linux/mv643xx_eth.h>
#include <linux/ethtool.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index 9250bb2e429c..cea08d4a2597 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -18,7 +18,7 @@
#include <linux/leds.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 25b3ee85066e..c8bfbbfdfcc3 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1083,12 +1083,17 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src,
/* Arithmatic Operation */
static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
- const u8 rn, struct jit_ctx *ctx, u8 op) {
+ const u8 rn, struct jit_ctx *ctx, u8 op,
+ bool is_jmp64) {
switch (op) {
case BPF_JSET:
- emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
- emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
- emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
+ if (is_jmp64) {
+ emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
+ emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
+ emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
+ } else {
+ emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx);
+ }
break;
case BPF_JEQ:
case BPF_JNE:
@@ -1096,18 +1101,25 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
case BPF_JGE:
case BPF_JLE:
case BPF_JLT:
- emit(ARM_CMP_R(rd, rm), ctx);
- _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
+ if (is_jmp64) {
+ emit(ARM_CMP_R(rd, rm), ctx);
+ /* Only compare low halve if high halve are equal. */
+ _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
+ } else {
+ emit(ARM_CMP_R(rt, rn), ctx);
+ }
break;
case BPF_JSLE:
case BPF_JSGT:
emit(ARM_CMP_R(rn, rt), ctx);
- emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
+ if (is_jmp64)
+ emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
break;
case BPF_JSLT:
case BPF_JSGE:
emit(ARM_CMP_R(rt, rn), ctx);
- emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
+ if (is_jmp64)
+ emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
break;
}
}
@@ -1615,6 +1627,17 @@ exit:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
/* Setup source registers */
rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
@@ -1641,6 +1664,17 @@ exit:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
if (off == 0)
break;
rm = tmp2[0];
@@ -1652,7 +1686,8 @@ go_jmp:
rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Check for the condition */
- emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code));
+ emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code),
+ BPF_CLASS(code) == BPF_JMP);
/* Setup JUMP instruction */
jmp_offset = bpf2a32_offset(i+off, i, ctx);
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index f4e58bcdaa43..13a05f759552 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -62,6 +62,7 @@
#define ARM_INST_ADDS_I 0x02900000
#define ARM_INST_AND_R 0x00000000
+#define ARM_INST_ANDS_R 0x00100000
#define ARM_INST_AND_I 0x02000000
#define ARM_INST_BIC_R 0x01c00000
@@ -172,6 +173,7 @@
#define ARM_ADC_I(rd, rn, imm) _AL3_I(ARM_INST_ADC, rd, rn, imm)
#define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm)
+#define ARM_ANDS_R(rd, rn, rm) _AL3_R(ARM_INST_ANDS, rd, rn, rm)
#define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm)
#define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm)
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index a2399fd66e97..a6c81ce00f52 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -18,7 +18,7 @@
#include <linux/clkdev.h>
#include <linux/mv643xx_eth.h>
#include <linux/mv643xx_i2c.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <linux/platform_data/dma-mv_xor.h>
#include <linux/platform_data/usb-ehci-orion.h>
#include <plat/common.h>
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 1542df00b23c..aaddc0217e73 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -362,7 +362,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
- const bool is64 = BPF_CLASS(code) == BPF_ALU64;
+ const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
+ BPF_CLASS(code) == BPF_JMP;
const bool isdw = BPF_SIZE(code) == BPF_DW;
u8 jmp_cond;
s32 jmp_offset;
@@ -559,7 +560,17 @@ emit_bswap_uxt:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
- emit(A64_CMP(1, dst, src), ctx);
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ emit(A64_CMP(is64, dst, src), ctx);
emit_cond_jmp:
jmp_offset = bpf2a64_offset(i + off, i, ctx);
check_imm19(jmp_offset);
@@ -601,7 +612,8 @@ emit_cond_jmp:
emit(A64_B_(jmp_cond, jmp_offset), ctx);
break;
case BPF_JMP | BPF_JSET | BPF_X:
- emit(A64_TST(1, dst, src), ctx);
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ emit(A64_TST(is64, dst, src), ctx);
goto emit_cond_jmp;
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -614,12 +626,23 @@ emit_cond_jmp:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
- emit_a64_mov_i(1, tmp, imm, ctx);
- emit(A64_CMP(1, dst, tmp), ctx);
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_CMP(is64, dst, tmp), ctx);
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_K:
- emit_a64_mov_i(1, tmp, imm, ctx);
- emit(A64_TST(1, dst, tmp), ctx);
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_TST(is64, dst, tmp), ctx);
goto emit_cond_jmp;
/* function call */
case BPF_JMP | BPF_CALL:
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index c872c4e6bafb..ba0d245f9576 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -117,4 +117,6 @@
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 62
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 71370fb3ceef..73e25e35d803 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -126,4 +126,6 @@
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 62
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 061b9cf2a779..52bed5976cbe 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -107,4 +107,6 @@
#define SO_TXTIME 0x4036
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 0x4037
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 19a8834e0398..f9513ad38fa6 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -337,6 +337,7 @@
#define PPC_INST_DIVWU 0x7c000396
#define PPC_INST_DIVD 0x7c0003d2
#define PPC_INST_RLWINM 0x54000000
+#define PPC_INST_RLWINM_DOT 0x54000001
#define PPC_INST_RLWIMI 0x50000000
#define PPC_INST_RLDICL 0x78000000
#define PPC_INST_RLDICR 0x78000004
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index c2d5192ed64f..549e9490ff2a 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -165,6 +165,10 @@
#define PPC_RLWINM(d, a, i, mb, me) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
___PPC_RS(a) | __PPC_SH(i) | \
__PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RLWINM_DOT(d, a, i, mb, me) EMIT(PPC_INST_RLWINM_DOT | \
+ ___PPC_RA(d) | ___PPC_RS(a) | \
+ __PPC_SH(i) | __PPC_MB(mb) | \
+ __PPC_ME(me))
#define PPC_RLWIMI(d, a, i, mb, me) EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \
___PPC_RS(a) | __PPC_SH(i) | \
__PPC_MB(mb) | __PPC_ME(me))
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 7ce57657d3b8..15bba765fa79 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -768,36 +768,58 @@ emit_clear:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
true_cond = COND_GT;
goto cond_branch;
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
true_cond = COND_LT;
goto cond_branch;
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
true_cond = COND_GE;
goto cond_branch;
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
true_cond = COND_LE;
goto cond_branch;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
true_cond = COND_EQ;
goto cond_branch;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
true_cond = COND_NE;
goto cond_branch;
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
true_cond = COND_NE;
/* Fall through */
@@ -809,18 +831,44 @@ cond_branch:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
/* unsigned comparison */
- PPC_CMPLD(dst_reg, src_reg);
+ if (BPF_CLASS(code) == BPF_JMP32)
+ PPC_CMPLW(dst_reg, src_reg);
+ else
+ PPC_CMPLD(dst_reg, src_reg);
break;
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
/* signed comparison */
- PPC_CMPD(dst_reg, src_reg);
+ if (BPF_CLASS(code) == BPF_JMP32)
+ PPC_CMPW(dst_reg, src_reg);
+ else
+ PPC_CMPD(dst_reg, src_reg);
break;
case BPF_JMP | BPF_JSET | BPF_X:
- PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ if (BPF_CLASS(code) == BPF_JMP) {
+ PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
+ src_reg);
+ } else {
+ int tmp_reg = b2p[TMP_REG_1];
+
+ PPC_AND(tmp_reg, dst_reg, src_reg);
+ PPC_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
+ 31);
+ }
break;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -828,43 +876,87 @@ cond_branch:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ {
+ bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
+
/*
* Need sign-extended load, so only positive
* values can be used as imm in cmpldi
*/
- if (imm >= 0 && imm < 32768)
- PPC_CMPLDI(dst_reg, imm);
- else {
+ if (imm >= 0 && imm < 32768) {
+ if (is_jmp32)
+ PPC_CMPLWI(dst_reg, imm);
+ else
+ PPC_CMPLDI(dst_reg, imm);
+ } else {
/* sign-extending load */
PPC_LI32(b2p[TMP_REG_1], imm);
/* ... but unsigned comparison */
- PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
+ if (is_jmp32)
+ PPC_CMPLW(dst_reg,
+ b2p[TMP_REG_1]);
+ else
+ PPC_CMPLD(dst_reg,
+ b2p[TMP_REG_1]);
}
break;
+ }
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ {
+ bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
+
/*
* signed comparison, so any 16-bit value
* can be used in cmpdi
*/
- if (imm >= -32768 && imm < 32768)
- PPC_CMPDI(dst_reg, imm);
- else {
+ if (imm >= -32768 && imm < 32768) {
+ if (is_jmp32)
+ PPC_CMPWI(dst_reg, imm);
+ else
+ PPC_CMPDI(dst_reg, imm);
+ } else {
PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
+ if (is_jmp32)
+ PPC_CMPW(dst_reg,
+ b2p[TMP_REG_1]);
+ else
+ PPC_CMPD(dst_reg,
+ b2p[TMP_REG_1]);
}
break;
+ }
case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
if (imm >= 0 && imm < 32768)
/* PPC_ANDI is _only/always_ dot-form */
PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
else {
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
- b2p[TMP_REG_1]);
+ int tmp_reg = b2p[TMP_REG_1];
+
+ PPC_LI32(tmp_reg, imm);
+ if (BPF_CLASS(code) == BPF_JMP) {
+ PPC_AND_DOT(tmp_reg, dst_reg,
+ tmp_reg);
+ } else {
+ PPC_AND(tmp_reg, dst_reg,
+ tmp_reg);
+ PPC_RLWINM_DOT(tmp_reg, tmp_reg,
+ 0, 0, 31);
+ }
}
break;
}
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 39d901476ee5..49c971587087 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -114,4 +114,6 @@
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 62
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 3ff758eeb71d..ce9defdff62a 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1110,103 +1110,141 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
mask = 0xf000; /* j */
goto branch_oc;
case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
+ case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
mask = 0x2000; /* jh */
goto branch_ks;
case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
+ case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
mask = 0x4000; /* jl */
goto branch_ks;
case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
+ case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
mask = 0xa000; /* jhe */
goto branch_ks;
case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
+ case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
mask = 0xc000; /* jle */
goto branch_ks;
case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
+ case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
mask = 0x2000; /* jh */
goto branch_ku;
case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
+ case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
mask = 0x4000; /* jl */
goto branch_ku;
case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
+ case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
mask = 0xa000; /* jhe */
goto branch_ku;
case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
+ case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
mask = 0xc000; /* jle */
goto branch_ku;
case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
+ case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
mask = 0x7000; /* jne */
goto branch_ku;
case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
+ case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
mask = 0x8000; /* je */
goto branch_ku;
case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
+ case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
mask = 0x7000; /* jnz */
- /* lgfi %w1,imm (load sign extend imm) */
- EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* ngr %w1,%dst */
- EMIT4(0xb9800000, REG_W1, dst_reg);
+ if (BPF_CLASS(insn->code) == BPF_JMP32) {
+ /* llilf %w1,imm (load zero extend imm) */
+ EMIT6_IMM(0xc0010000, REG_W1, imm);
+ /* nr %w1,%dst */
+ EMIT2(0x1400, REG_W1, dst_reg);
+ } else {
+ /* lgfi %w1,imm (load sign extend imm) */
+ EMIT6_IMM(0xc0010000, REG_W1, imm);
+ /* ngr %w1,%dst */
+ EMIT4(0xb9800000, REG_W1, dst_reg);
+ }
goto branch_oc;
case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
+ case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
mask = 0x2000; /* jh */
goto branch_xs;
case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
+ case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
mask = 0x4000; /* jl */
goto branch_xs;
case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
+ case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
mask = 0xa000; /* jhe */
goto branch_xs;
case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
+ case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
mask = 0xc000; /* jle */
goto branch_xs;
case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
+ case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
mask = 0x2000; /* jh */
goto branch_xu;
case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
+ case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
mask = 0x4000; /* jl */
goto branch_xu;
case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
+ case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
mask = 0xa000; /* jhe */
goto branch_xu;
case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
+ case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
mask = 0xc000; /* jle */
goto branch_xu;
case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
+ case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
mask = 0x7000; /* jne */
goto branch_xu;
case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
+ case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
mask = 0x8000; /* je */
goto branch_xu;
case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
+ case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
+ {
+ bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
+
mask = 0x7000; /* jnz */
- /* ngrk %w1,%dst,%src */
- EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg);
+ /* nrk or ngrk %w1,%dst,%src */
+ EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
+ REG_W1, dst_reg, src_reg);
goto branch_oc;
branch_ks:
/* lgfi %w1,imm (load sign extend imm) */
EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* cgrj %dst,%w1,mask,off */
- EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask);
+ /* crj or cgrj %dst,%w1,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
+ dst_reg, REG_W1, i, off, mask);
break;
branch_ku:
/* lgfi %w1,imm (load sign extend imm) */
EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* clgrj %dst,%w1,mask,off */
- EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask);
+ /* clrj or clgrj %dst,%w1,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
+ dst_reg, REG_W1, i, off, mask);
break;
branch_xs:
- /* cgrj %dst,%src,mask,off */
- EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask);
+ /* crj or cgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
+ dst_reg, src_reg, i, off, mask);
break;
branch_xu:
- /* clgrj %dst,%src,mask,off */
- EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask);
+ /* clrj or clgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
+ dst_reg, src_reg, i, off, mask);
break;
branch_oc:
/* brc mask,jmp_off (branch instruction needs 4 bytes) */
jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
break;
+ }
default: /* too complex, give up */
pr_err("Unknown opcode %02x\n", insn->code);
return -1;
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 7ea35e5601b6..bbdb81594dd4 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -104,6 +104,8 @@
#define SO_TXTIME 0x003f
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 0x0041
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 5542303c43d9..afabf597c855 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -881,20 +881,41 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
/* cmp dst_reg, src_reg */
- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
- add_2reg(0xC0, dst_reg, src_reg));
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ EMIT1(add_2mod(0x48, dst_reg, src_reg));
+ else if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
/* test dst_reg, src_reg */
- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
- add_2reg(0xC0, dst_reg, src_reg));
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ EMIT1(add_2mod(0x48, dst_reg, src_reg));
+ else if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
/* test dst_reg, imm32 */
- EMIT1(add_1mod(0x48, dst_reg));
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ EMIT1(add_1mod(0x48, dst_reg));
+ else if (is_ereg(dst_reg))
+ EMIT1(add_1mod(0x40, dst_reg));
EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
goto emit_cond_jmp;
@@ -908,8 +929,21 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
/* cmp dst_reg, imm8/32 */
- EMIT1(add_1mod(0x48, dst_reg));
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ EMIT1(add_1mod(0x48, dst_reg));
+ else if (is_ereg(dst_reg))
+ EMIT1(add_1mod(0x40, dst_reg));
if (is_imm8(imm32))
EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 8f6cc71e0848..0d9cdffce6ac 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -2072,7 +2072,18 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
- case BPF_JMP | BPF_JSGE | BPF_X: {
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
@@ -2081,25 +2092,35 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
- STACK_VAR(dst_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
}
if (sstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX),
- STACK_VAR(src_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EBX),
+ STACK_VAR(src_hi));
}
- /* cmp dreg_hi,sreg_hi */
- EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
- EMIT2(IA32_JNE, 2);
+ if (is_jmp64) {
+ /* cmp dreg_hi,sreg_hi */
+ EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
+ EMIT2(IA32_JNE, 2);
+ }
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
goto emit_cond_jmp;
}
- case BPF_JMP | BPF_JSET | BPF_X: {
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
@@ -2108,15 +2129,21 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
- STACK_VAR(dst_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
}
if (sstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX),
- STACK_VAR(src_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EBX),
+ STACK_VAR(src_hi));
}
/* and dreg_lo,sreg_lo */
EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
@@ -2126,32 +2153,39 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
goto emit_cond_jmp;
}
- case BPF_JMP | BPF_JSET | BPF_K: {
- u32 hi;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
+ u32 hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
- STACK_VAR(dst_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
}
- hi = imm32 & (1<<31) ? (u32)~0 : 0;
/* mov ecx,imm32 */
- EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
- /* mov ebx,imm32 */
- EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
+ EMIT2_off32(0xC7, add_1reg(0xC0, sreg_lo), imm32);
/* and dreg_lo,sreg_lo */
EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
- /* and dreg_hi,sreg_hi */
- EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
- /* or dreg_lo,dreg_hi */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
+ if (is_jmp64) {
+ hi = imm32 & (1 << 31) ? (u32)~0 : 0;
+ /* mov ebx,imm32 */
+ EMIT2_off32(0xC7, add_1reg(0xC0, sreg_hi), hi);
+ /* and dreg_hi,sreg_hi */
+ EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
+ /* or dreg_lo,dreg_hi */
+ EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
+ }
goto emit_cond_jmp;
}
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -2163,29 +2197,44 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
- case BPF_JMP | BPF_JSGE | BPF_K: {
- u32 hi;
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
+ u32 hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
- STACK_VAR(dst_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
}
- hi = imm32 & (1<<31) ? (u32)~0 : 0;
/* mov ecx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
- /* mov ebx,imm32 */
- EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
-
- /* cmp dreg_hi,sreg_hi */
- EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
- EMIT2(IA32_JNE, 2);
+ if (is_jmp64) {
+ hi = imm32 & (1 << 31) ? (u32)~0 : 0;
+ /* mov ebx,imm32 */
+ EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
+ /* cmp dreg_hi,sreg_hi */
+ EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
+ EMIT2(IA32_JNE, 2);
+ }
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 1de07a7f7680..b434217783d0 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -119,4 +119,6 @@
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 62
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 59b75299fcbc..2e11b0d6a9bb 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -24,6 +24,7 @@
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/dst.h>
+#include <net/tls.h>
#include "chtls.h"
#include "chtls_cm.h"
@@ -1015,6 +1016,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
{
struct inet_sock *newinet;
const struct iphdr *iph;
+ struct tls_context *ctx;
struct net_device *ndev;
struct chtls_sock *csk;
struct dst_entry *dst;
@@ -1063,6 +1065,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
sk_setup_caps(newsk, dst);
+ ctx = tls_get_ctx(lsk);
+ newsk->sk_destruct = ctx->sk_destruct;
csk->sk = newsk;
csk->passive_reap_next = oreq;
csk->tx_chan = cxgb4_port_chan(ndev);
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index e932a152c405..d7b011c8d692 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -332,7 +332,7 @@ static int make_raw_data_56k(struct BCState *bcs) {
bitcnt = 0;
}
val >>= 1;
- };
+ }
fcs = PPP_INITFCS;
for (i = 0; i < bcs->tx_skb->len; i++) {
val = bcs->tx_skb->data[i];
@@ -415,7 +415,7 @@ static void read_raw(struct BCState *bcs, u_int *buf, int cnt) {
else { // it's 56K
mask = 0x7f;
bits = 7;
- };
+ }
for (i = 0; i < cnt; i++) {
val = bcs->channel ? ((*p >> 8) & 0xff) : (*p & 0xff);
p++;
@@ -623,7 +623,7 @@ void netjet_fill_dma(struct BCState *bcs)
else { // it's 56k
if (make_raw_data_56k(bcs))
return;
- };
+ }
if (bcs->cs->debug & L1_DEB_HSCX)
debugl1(bcs->cs, "tiger fill_dma2: c%d %4lx", bcs->channel,
bcs->Flag);
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index 298c8dba0321..6b8c3fbe3965 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -598,7 +598,7 @@ prcalling(char *dest, u_char *p)
dp += prbits(dp, *++p, 8, 8);
*dp++ = '\n';
l--;
- };
+ }
p++;
dp += sprintf(dp, " number digits ");
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
index 8cd2d8277426..b421b86ca7da 100644
--- a/drivers/isdn/hisax/st5481.h
+++ b/drivers/isdn/hisax/st5481.h
@@ -512,7 +512,7 @@ static inline const char *ST5481_CMD_string(int evt)
case ST5481_CMD_AR10: return "AR10";
case ST5481_CMD_ARL: return "ARL";
case ST5481_CMD_PDN: return "PDN";
- };
+ }
sprintf(s, "0x%x", evt);
return s;
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index a4597e96c916..f4253d468ae1 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -72,7 +72,7 @@ isdnloop_bchan_send(isdnloop_card *card, int ch)
printk(KERN_WARNING "isdnloop: no rcard, skb dropped\n");
dev_kfree_skb(skb);
- };
+ }
cmd.command = ISDN_STAT_BSENT;
cmd.parm.length = len;
card->interface.statcallb(&cmd);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 7c46d9f4fefd..9274dcc6e9b0 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -31,6 +31,7 @@
#include <net/net_namespace.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
+#include <net/netlink.h>
/* General definitions */
#define AD_SHORT_TIMEOUT 1
@@ -851,6 +852,9 @@ static int ad_lacpdu_send(struct port *port)
if (!skb)
return -ENOMEM;
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_tx);
+ atomic64_inc(&BOND_AD_INFO(slave->bond).stats.lacpdu_tx);
+
skb->dev = slave->dev;
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
@@ -892,6 +896,17 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
if (!skb)
return -ENOMEM;
+ switch (marker->tlv_type) {
+ case AD_MARKER_INFORMATION_SUBTYPE:
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_tx);
+ atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_tx);
+ break;
+ case AD_MARKER_RESPONSE_SUBTYPE:
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_resp_tx);
+ atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_resp_tx);
+ break;
+ }
+
skb_reserve(skb, 16);
skb->dev = slave->dev;
@@ -1086,6 +1101,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
*/
last_state = port->sm_rx_state;
+ if (lacpdu) {
+ atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.lacpdu_rx);
+ atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.lacpdu_rx);
+ }
/* check if state machine should change state */
/* first, check if port was reinitialized */
@@ -1922,6 +1941,9 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
{
struct bond_marker marker;
+ atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_rx);
+ atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_rx);
+
/* copy the received marker data to the response marker */
memcpy(&marker, marker_info, sizeof(struct bond_marker));
/* change the marker subtype to marker response */
@@ -1946,6 +1968,9 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
static void ad_marker_response_received(struct bond_marker *marker,
struct port *port)
{
+ atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_resp_rx);
+ atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_resp_rx);
+
/* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
}
@@ -2348,66 +2373,68 @@ re_arm:
* bond_3ad_rx_indication - handle a received frame
* @lacpdu: received lacpdu
* @slave: slave struct to work on
- * @length: length of the data received
*
* It is assumed that frames that were sent on this NIC don't returned as new
* received frames (loopback). Since only the payload is given to this
* function, it check for loopback.
*/
-static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
- u16 length)
+static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave)
{
- struct port *port;
+ struct bonding *bond = slave->bond;
int ret = RX_HANDLER_ANOTHER;
+ struct bond_marker *marker;
+ struct port *port;
+ atomic64_t *stat;
- if (length >= sizeof(struct lacpdu)) {
-
- port = &(SLAVE_AD_INFO(slave)->port);
-
- if (!port->slave) {
- net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
- slave->dev->name, slave->bond->dev->name);
- return ret;
- }
+ port = &(SLAVE_AD_INFO(slave)->port);
+ if (!port->slave) {
+ net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
+ slave->dev->name, slave->bond->dev->name);
+ return ret;
+ }
- switch (lacpdu->subtype) {
- case AD_TYPE_LACPDU:
- ret = RX_HANDLER_CONSUMED;
- netdev_dbg(slave->bond->dev,
- "Received LACPDU on port %d slave %s\n",
- port->actor_port_number,
- slave->dev->name);
- /* Protect against concurrent state machines */
- spin_lock(&slave->bond->mode_lock);
- ad_rx_machine(lacpdu, port);
- spin_unlock(&slave->bond->mode_lock);
+ switch (lacpdu->subtype) {
+ case AD_TYPE_LACPDU:
+ ret = RX_HANDLER_CONSUMED;
+ netdev_dbg(slave->bond->dev,
+ "Received LACPDU on port %d slave %s\n",
+ port->actor_port_number, slave->dev->name);
+ /* Protect against concurrent state machines */
+ spin_lock(&slave->bond->mode_lock);
+ ad_rx_machine(lacpdu, port);
+ spin_unlock(&slave->bond->mode_lock);
+ break;
+ case AD_TYPE_MARKER:
+ ret = RX_HANDLER_CONSUMED;
+ /* No need to convert fields to Little Endian since we
+ * don't use the marker's fields.
+ */
+ marker = (struct bond_marker *)lacpdu;
+ switch (marker->tlv_type) {
+ case AD_MARKER_INFORMATION_SUBTYPE:
+ netdev_dbg(slave->bond->dev, "Received Marker Information on port %d\n",
+ port->actor_port_number);
+ ad_marker_info_received(marker, port);
break;
-
- case AD_TYPE_MARKER:
- ret = RX_HANDLER_CONSUMED;
- /* No need to convert fields to Little Endian since we
- * don't use the marker's fields.
- */
-
- switch (((struct bond_marker *)lacpdu)->tlv_type) {
- case AD_MARKER_INFORMATION_SUBTYPE:
- netdev_dbg(slave->bond->dev, "Received Marker Information on port %d\n",
- port->actor_port_number);
- ad_marker_info_received((struct bond_marker *)lacpdu, port);
- break;
-
- case AD_MARKER_RESPONSE_SUBTYPE:
- netdev_dbg(slave->bond->dev, "Received Marker Response on port %d\n",
- port->actor_port_number);
- ad_marker_response_received((struct bond_marker *)lacpdu, port);
- break;
-
- default:
- netdev_dbg(slave->bond->dev, "Received an unknown Marker subtype on slot %d\n",
- port->actor_port_number);
- }
+ case AD_MARKER_RESPONSE_SUBTYPE:
+ netdev_dbg(slave->bond->dev, "Received Marker Response on port %d\n",
+ port->actor_port_number);
+ ad_marker_response_received(marker, port);
+ break;
+ default:
+ netdev_dbg(slave->bond->dev, "Received an unknown Marker subtype on slot %d\n",
+ port->actor_port_number);
+ stat = &SLAVE_AD_INFO(slave)->stats.marker_unknown_rx;
+ atomic64_inc(stat);
+ stat = &BOND_AD_INFO(bond).stats.marker_unknown_rx;
+ atomic64_inc(stat);
}
+ break;
+ default:
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_unknown_rx);
+ atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_unknown_rx);
}
+
return ret;
}
@@ -2643,10 +2670,13 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
return RX_HANDLER_ANOTHER;
lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
- if (!lacpdu)
+ if (!lacpdu) {
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_illegal_rx);
+ atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_illegal_rx);
return RX_HANDLER_ANOTHER;
+ }
- return bond_3ad_rx_indication(lacpdu, slave, skb->len);
+ return bond_3ad_rx_indication(lacpdu, slave);
}
/**
@@ -2678,3 +2708,61 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
}
spin_unlock_bh(&bond->mode_lock);
}
+
+size_t bond_3ad_stats_size(void)
+{
+ return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_TX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_UNKNOWN_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_ILLEGAL_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_TX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_TX */
+ nla_total_size_64bit(sizeof(u64)); /* BOND_3AD_STAT_MARKER_UNKNOWN_RX */
+}
+
+int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats)
+{
+ u64 val;
+
+ val = atomic64_read(&stats->lacpdu_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->lacpdu_tx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_TX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->lacpdu_unknown_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_UNKNOWN_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->lacpdu_illegal_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_ILLEGAL_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+
+ val = atomic64_read(&stats->marker_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->marker_tx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_TX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->marker_resp_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->marker_resp_tx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_TX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->marker_unknown_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_UNKNOWN_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 6b9ad8673218..b286f591242e 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -675,6 +675,71 @@ nla_put_failure:
return -EMSGSIZE;
}
+static size_t bond_get_linkxstats_size(const struct net_device *dev, int attr)
+{
+ switch (attr) {
+ case IFLA_STATS_LINK_XSTATS:
+ case IFLA_STATS_LINK_XSTATS_SLAVE:
+ break;
+ default:
+ return 0;
+ }
+
+ return bond_3ad_stats_size() + nla_total_size(0);
+}
+
+static int bond_fill_linkxstats(struct sk_buff *skb,
+ const struct net_device *dev,
+ int *prividx, int attr)
+{
+ struct nlattr *nla __maybe_unused;
+ struct slave *slave = NULL;
+ struct nlattr *nest, *nest2;
+ struct bonding *bond;
+
+ switch (attr) {
+ case IFLA_STATS_LINK_XSTATS:
+ bond = netdev_priv(dev);
+ break;
+ case IFLA_STATS_LINK_XSTATS_SLAVE:
+ slave = bond_slave_get_rtnl(dev);
+ if (!slave)
+ return 0;
+ bond = slave->bond;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BOND);
+ if (!nest)
+ return -EMSGSIZE;
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ struct bond_3ad_stats *stats;
+
+ if (slave)
+ stats = &SLAVE_AD_INFO(slave)->stats;
+ else
+ stats = &BOND_AD_INFO(bond).stats;
+
+ nest2 = nla_nest_start(skb, BOND_XSTATS_3AD);
+ if (!nest2) {
+ nla_nest_end(skb, nest);
+ return -EMSGSIZE;
+ }
+
+ if (bond_3ad_stats_fill(skb, stats)) {
+ nla_nest_cancel(skb, nest2);
+ nla_nest_end(skb, nest);
+ return -EMSGSIZE;
+ }
+ nla_nest_end(skb, nest2);
+ }
+ nla_nest_end(skb, nest);
+
+ return 0;
+}
+
struct rtnl_link_ops bond_link_ops __read_mostly = {
.kind = "bond",
.priv_size = sizeof(struct bonding),
@@ -689,6 +754,8 @@ struct rtnl_link_ops bond_link_ops __read_mostly = {
.get_num_tx_queues = bond_get_num_tx_queues,
.get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
as for TX queues */
+ .fill_linkxstats = bond_fill_linkxstats,
+ .get_linkxstats_size = bond_get_linkxstats_size,
.slave_maxtype = IFLA_BOND_SLAVE_MAX,
.slave_policy = bond_slave_policy,
.slave_changelink = bond_slave_changelink,
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 693a67f45bef..27d092cab40e 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1069,10 +1069,10 @@ static int gswip_probe(struct platform_device *pdev)
version = gswip_switch_r(priv, GSWIP_VERSION);
/* bring up the mdio bus */
- gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL,
- "lantiq,gphy-fw");
+ gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
if (gphy_fw_np) {
err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
+ of_node_put(gphy_fw_np);
if (err) {
dev_err(dev, "gphy fw probe failed\n");
return err;
@@ -1080,13 +1080,12 @@ static int gswip_probe(struct platform_device *pdev)
}
/* bring up the mdio bus */
- mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
- "lantiq,xrx200-mdio");
+ mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
if (mdio_np) {
err = gswip_mdio(priv, mdio_np);
if (err) {
dev_err(dev, "mdio probe failed\n");
- goto gphy_fw;
+ goto put_mdio_node;
}
}
@@ -1099,7 +1098,7 @@ static int gswip_probe(struct platform_device *pdev)
dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
priv->hw_info->cpu_port);
err = -EINVAL;
- goto mdio_bus;
+ goto disable_switch;
}
platform_set_drvdata(pdev, priv);
@@ -1109,10 +1108,14 @@ static int gswip_probe(struct platform_device *pdev)
(version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
return 0;
+disable_switch:
+ gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
+ dsa_unregister_switch(priv->ds);
mdio_bus:
if (mdio_np)
mdiobus_unregister(priv->ds->slave_mii_bus);
-gphy_fw:
+put_mdio_node:
+ of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
return err;
@@ -1123,16 +1126,15 @@ static int gswip_remove(struct platform_device *pdev)
struct gswip_priv *priv = platform_get_drvdata(pdev);
int i;
- if (!priv)
- return 0;
-
/* disable the switch */
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
- if (priv->ds->slave_mii_bus)
+ if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
+ of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ }
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 7e97e620bd44..a4b6cda38016 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -420,7 +420,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
static int
qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
{
- u32 reg;
+ u32 reg, val;
switch (port) {
case 0:
@@ -439,17 +439,9 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
*/
switch (mode) {
case PHY_INTERFACE_MODE_RGMII:
- qca8k_write(priv, reg,
- QCA8K_PORT_PAD_RGMII_EN |
- QCA8K_PORT_PAD_RGMII_TX_DELAY(3) |
- QCA8K_PORT_PAD_RGMII_RX_DELAY(3));
-
- /* According to the datasheet, RGMII delay is enabled through
- * PORT5_PAD_CTRL for all ports, rather than individual port
- * registers
- */
- qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+ /* RGMII mode means no delay so don't enable the delay */
+ val = QCA8K_PORT_PAD_RGMII_EN;
+ qca8k_write(priv, reg, val);
break;
case PHY_INTERFACE_MODE_SGMII:
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index b223769d6a5e..3da97996bdf3 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1266,12 +1266,14 @@ el3_up(struct net_device *dev)
pr_cont("Forcing 3c5x9b full-duplex mode");
break;
}
+ /* fall through */
case 8:
/* set full-duplex mode based on eeprom config setting */
if ((sw_info & 0x000f) && (sw_info & 0x8000)) {
pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)");
break;
}
+ /* fall through */
default:
/* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */
pr_cont("Setting 3c5x9/3c5x9B half-duplex mode");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index e548c0ae2e00..145fe71fd155 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -435,7 +435,7 @@ static int amd8111e_restart(struct net_device *dev)
int i,reg_val;
/* stop the chip */
- writel(RUN, mmio + CMD0);
+ writel(RUN, mmio + CMD0);
if(amd8111e_init_ring(dev))
return -ENOMEM;
@@ -1720,7 +1720,7 @@ static void amd8111e_config_ipg(struct timer_list *t)
writew((u32)tmp_ipg, mmio + IPG);
writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
}
- mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
+ mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
return;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bb41becb6609..c44e95255aa1 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -909,7 +909,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
(adapter->txd_write_ptr >> 2));
mmiowb();
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 03d131f777bc..6026b53137aa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -32,7 +32,7 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.712.30-0"
+#define DRV_MODULE_VERSION "1.713.36-0"
#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 46ee2c01f4c5..066765fbef06 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -449,7 +449,7 @@ static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
ccd[cos] =
(u32)input_data->cos_min_rate[cos] * 100 *
(T_FAIR_COEF / (8 * 100 * cosWeightSum));
- if (ccd[cos] < pdata->fair_vars.fair_threshold
+ if (ccd[cos] < pdata->fair_vars.fair_threshold
+ MIN_ABOVE_THRESH) {
ccd[cos] =
pdata->fair_vars.fair_threshold +
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 98d4c5a3ff21..d581d0ae6584 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -837,49 +837,45 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
switch (cos_entry) {
case 0:
- nig_reg_adress_crd_weight =
- (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
- pbf_reg_adress_crd_weight = (port) ?
- PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
- break;
+ nig_reg_adress_crd_weight =
+ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+ break;
case 1:
- nig_reg_adress_crd_weight = (port) ?
- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
- pbf_reg_adress_crd_weight = (port) ?
- PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
- break;
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+ break;
case 2:
- nig_reg_adress_crd_weight = (port) ?
- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
- pbf_reg_adress_crd_weight = (port) ?
- PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
- break;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+ break;
case 3:
- if (port)
+ if (port)
return -EINVAL;
- nig_reg_adress_crd_weight =
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
- pbf_reg_adress_crd_weight =
- PBF_REG_COS3_WEIGHT_P0;
- break;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+ pbf_reg_adress_crd_weight = PBF_REG_COS3_WEIGHT_P0;
+ break;
case 4:
- if (port)
- return -EINVAL;
- nig_reg_adress_crd_weight =
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
- pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
- break;
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+ pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+ break;
case 5:
- if (port)
- return -EINVAL;
- nig_reg_adress_crd_weight =
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
- pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
- break;
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+ pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+ break;
}
REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
@@ -966,7 +962,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
if (pri >= max_num_of_cos) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter Illegal strict priority\n");
- return -EINVAL;
+ return -EINVAL;
}
if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
@@ -1845,28 +1841,28 @@ static int bnx2x_emac_enable(struct link_params *params,
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
- /* pause enable/disable */
- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
+ /* pause enable/disable */
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- if (!(params->feature_config_flags &
- FEATURE_CONFIG_PFC_ENABLED)) {
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
-
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- } else
- bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_FLOW_EN);
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -6339,7 +6335,7 @@ int bnx2x_set_led(struct link_params *params,
*/
if (!vars->link_up)
break;
- /* else: fall through */
+ /* fall through */
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
@@ -6478,9 +6474,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
- /* Link is up only if both local phy and external phy are up */
- if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
- return -ESRCH;
+ /* Link is up only if both local phy and external phy are up */
+ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+ return -ESRCH;
}
/* In XGXS loopback mode, do not check external PHY */
if (params->loopback_mode == LOOPBACK_XGXS)
@@ -7293,8 +7289,8 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
DP(NETIF_MSG_LINK,
"XAUI workaround has completed\n");
return 0;
- }
- usleep_range(3000, 6000);
+ }
+ usleep_range(3000, 6000);
}
break;
}
@@ -12675,39 +12671,39 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->mac_type = MAC_TYPE_BMAC;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_BMAC;
- vars->phy_flags = PHY_XGXS_FLAG;
+ vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_xgxs_deassert(params);
+ bnx2x_xgxs_deassert(params);
- /* Set bmac loopback */
- bnx2x_bmac_enable(params, vars, 1, 1);
+ /* Set bmac loopback */
+ bnx2x_bmac_enable(params, vars, 1, 1);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
}
static void bnx2x_init_emac_loopback(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- vars->link_up = 1;
- vars->line_speed = SPEED_1000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->mac_type = MAC_TYPE_EMAC;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_EMAC;
- vars->phy_flags = PHY_XGXS_FLAG;
+ vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_xgxs_deassert(params);
- /* Set bmac loopback */
- bnx2x_emac_enable(params, vars, 1);
- bnx2x_emac_program(params, vars);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ bnx2x_xgxs_deassert(params);
+ /* Set bmac loopback */
+ bnx2x_emac_enable(params, vars, 1);
+ bnx2x_emac_program(params, vars);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
}
static void bnx2x_init_xmac_loopback(struct link_params *params,
@@ -13073,12 +13069,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
}
- if (!CHIP_IS_E3(bp)) {
- bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
- } else {
- bnx2x_set_xmac_rxtx(params, 0);
- bnx2x_set_umac_rxtx(params, 0);
- }
+ if (!CHIP_IS_E3(bp)) {
+ bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+ } else {
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
/* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3b5b47e98c73..0cec82450e19 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11298,7 +11298,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
dev_info.port_hw_config[port].external_phy_config),
SHMEM_RD(bp,
dev_info.port_hw_config[port].external_phy_config2));
- return;
+ return;
}
if (CHIP_IS_E3(bp))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index a9eaaf3e73a4..7b22a6d8514c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2977,8 +2977,8 @@ static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
cmd_pos->data.macs_num--;
- DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
- cmd_pos->data.macs_num, cnt);
+ DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+ cmd_pos->data.macs_num, cnt);
/* Break if we reached the maximum
* number of rules.
@@ -3597,8 +3597,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
p->mcast_list_len = reg_sz;
- DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
- cmd, p->mcast_list_len);
+ DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+ cmd, p->mcast_list_len);
break;
case BNX2X_MCAST_CMD_ADD:
@@ -3735,8 +3735,8 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
i++;
- DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- cfg_data.mac);
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ cfg_data.mac);
}
*rdata_idx = i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index c835f6c7ecd0..c97b642e6537 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2230,7 +2230,7 @@ int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
rc = bnx2x_vf_close(bp, vf);
if (rc)
goto op_err;
- /* Fallthrough to release resources */
+ /* Fall through - to release resources */
case VF_ACQUIRED:
DP(BNX2X_MSG_IOV, "about to free resources\n");
bnx2x_vf_free_resc(bp, vf);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b1627dd5f2fd..328373e0578f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -721,7 +721,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
- /* else: fall through */
+ /* fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -782,7 +782,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
- /* else: fall through */
+ /* fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index a36e38676640..cbc9175d74c5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -788,9 +788,8 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_INITFAIL:
del_timer(&ioc->iocpf_timer);
- /*
- * !!! fall through !!!
- */
+ /* fall through */
+
case IOCPF_E_TIMEOUT:
bfa_nw_ioc_hw_sem_release(ioc);
if (event == IOCPF_E_TIMEOUT)
@@ -858,9 +857,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_FAIL:
del_timer(&ioc->iocpf_timer);
- /*
- * !!! fall through !!!
- */
+ /* fall through*/
case IOCPF_E_TIMEOUT:
bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 825a28e5b544..e21bf3724611 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -661,7 +661,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
(((rh->r_dh.encap_on) &&
(rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
(!(rh->r_dh.encap_on) &&
- (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
+ ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
+ CNNIC_CSUM_VERIFIED))))
/* checksum has already been verified */
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 5701272aa7f7..ce28820c57c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -289,8 +289,7 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
return NULL;
- ctbl = kvzalloc(sizeof(*ctbl) +
- clipt_size*sizeof(struct list_head), GFP_KERNEL);
+ ctbl = kvzalloc(struct_size(ctbl, hash_list, clipt_size), GFP_KERNEL);
if (!ctbl)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index d07230c892a5..796043544fc3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -446,8 +446,10 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
unsigned long *link_mode_mask)
{
#define SET_LMM(__lmm_name) \
- __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
- link_mode_mask)
+ do { \
+ __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
+ link_mode_mask); \
+ } while (0)
#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
do { \
@@ -541,7 +543,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
case FW_PORT_TYPE_CR4_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
- FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
@@ -552,6 +554,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
break;
}
+ if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
+ FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
+ FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
+ } else {
+ SET_LMM(FEC_NONE);
+ }
+
FW_CAPS_TO_LMM(ANEG, Autoneg);
FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
@@ -679,18 +688,15 @@ static int set_link_ksettings(struct net_device *dev,
base->autoneg == AUTONEG_DISABLE) {
fw_caps = speed_to_fw_caps(base->speed);
- /* Must only specify a single speed which must be supported
- * as part of the Physical Port Capabilities.
- */
- if ((fw_caps & (fw_caps - 1)) != 0 ||
- !(lc->pcaps & fw_caps))
+ /* Speed must be supported by Physical Port Capabilities. */
+ if (!(lc->pcaps & fw_caps))
return -EINVAL;
lc->speed_caps = fw_caps;
lc->acaps = fw_caps;
} else {
fw_caps =
- lmm_to_fw_caps(link_ksettings->link_modes.advertising);
+ lmm_to_fw_caps(link_ksettings->link_modes.advertising);
if (!(lc->pcaps & fw_caps))
return -EINVAL;
lc->speed_caps = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6ba9099ca7fe..73fc2479b4f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5653,7 +5653,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_HW_TLS_RECORD;
}
if (highdma)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index c7d2b4dc7568..02fc63fa7f25 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -444,8 +444,7 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
if (!max_tids)
return NULL;
- t = kvzalloc(sizeof(*t) +
- (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL);
+ t = kvzalloc(struct_size(t, table, max_tids), GFP_KERNEL);
if (!t)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 4852febbfec3..1a407d3c1d67 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -646,7 +646,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
if (l2t_size < L2T_MIN_HASH_BUCKETS)
return NULL;
- d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL);
+ d = kvzalloc(struct_size(d, l2tab, l2t_size), GFP_KERNEL);
if (!d)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2b03f6187a24..c5e5466ee38b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4105,6 +4105,9 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
* @mbox: the Firmware Mailbox to use
* @port: the Port ID
* @lc: the Port's Link Configuration
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
*
* Set up a port's MAC and PHY according to a desired link configuration.
* - If the PHY can auto-negotiate first decide what to advertise, then
@@ -4124,6 +4127,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
int ret;
fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
+
/* Convert driver coding of Pause Frame Flow Control settings into the
* Firmware's API.
*/
@@ -4143,8 +4147,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
fw_fec = cc_to_fwcap_fec(cc_fec);
/* Figure out what our Requested Port Capabilities are going to be.
+ * Note parallel structure in t4_handle_get_port_info() and
+ * init_link_config().
*/
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+ if (lc->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
rcap = lc->acaps | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
@@ -4156,7 +4165,11 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
- /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
+ /* Some Requested Port Capabilities are trivially wrong if they exceed
+ * the Physical Port Capabilities. We can check that here and provide
+ * moderately useful feedback in the system log.
+ *
+ * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
* we need to exclude this from this check in order to maintain
* compatibility ...
*/
@@ -4185,6 +4198,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
sleep_ok, timeout);
+
+ /* Unfortunately, even if the Requested Port Capabilities "fit" within
+ * the Physical Port Capabilities, some combinations of features may
+ * still not be leagal. For example, 40Gb/s and Reed-Solomon Forward
+ * Error Correction. So if the Firmware rejects the L1 Configure
+ * request, flag that here.
+ */
if (ret) {
dev_err(adapter->pdev_dev,
"Requested Port Capabilities %#x rejected, error %d\n",
@@ -8461,6 +8481,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
fc = fwcap_to_cc_pause(linkattr);
speed = fwcap_to_speed(linkattr);
+ /* Reset state for communicating new Transceiver Module status and
+ * whether the OS-dependent layer wants us to redo the current
+ * "sticky" L1 Configure Link Parameters.
+ */
lc->new_module = false;
lc->redo_l1cfg = false;
@@ -8497,9 +8521,15 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
*/
pi->port_type = port_type;
+ /* Record new Module Type information.
+ */
pi->mod_type = mod_type;
+ /* Let the OS-dependent layer know if we have a new
+ * Transceiver Module inserted.
+ */
lc->new_module = t4_is_inserted_mod_type(mod_type);
+
t4_os_portmod_changed(adapter, pi->port_id);
}
@@ -8507,8 +8537,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
fc != lc->fc || fec != lc->fec) { /* something changed */
if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc;
- dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
- pi->tx_chan, t4_link_down_rc_str(linkdnrc));
+ dev_warn_ratelimited(adapter->pdev_dev,
+ "Port %d link down, reason: %s\n",
+ pi->tx_chan,
+ t4_link_down_rc_str(linkdnrc));
}
lc->link_ok = link_ok;
lc->speed = speed;
@@ -8518,6 +8550,11 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
lc->lpacaps = lpacaps;
lc->acaps = acaps & ADVERT_MASK;
+ /* If we're not physically capable of Auto-Negotiation, note
+ * this as Auto-Negotiation disabled. Otherwise, we track
+ * what Auto-Negotiation settings we have. Note parallel
+ * structure in t4_link_l1cfg_core() and init_link_config().
+ */
if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
lc->autoneg = AUTONEG_DISABLE;
} else if (lc->acaps & FW_PORT_CAP32_ANEG) {
@@ -8535,6 +8572,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
t4_os_link_changed(adapter, pi->port_id, link_ok);
}
+ /* If we have a new Transceiver Module and the OS-dependent code has
+ * told us that it wants us to redo whatever "sticky" L1 Configuration
+ * Link Parameters are set, do that now.
+ */
if (lc->new_module && lc->redo_l1cfg) {
struct link_config old_lc;
int ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 2fab87e86561..1fa24af8099f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -791,6 +791,13 @@ static int cxgb4vf_open(struct net_device *dev)
return err;
}
+ /* It's possible that the basic port information could have
+ * changed since we first read it.
+ */
+ err = t4vf_update_port_info(pi);
+ if (err < 0)
+ return err;
+
/*
* Note that this interface is up and start everything up ...
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 5b8c08cf523f..84dff74ca9cd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -2005,8 +2005,10 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
fc != lc->fc || fec != lc->fec) { /* something changed */
if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc;
- dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
- pi->port_id, t4vf_link_down_rc_str(linkdnrc));
+ dev_warn_ratelimited(adapter->pdev_dev,
+ "Port %d link down, reason: %s\n",
+ pi->port_id,
+ t4vf_link_down_rc_str(linkdnrc));
}
lc->link_ok = link_ok;
lc->speed = speed;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 0a82fcf16d35..c2586f44c29d 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -395,6 +395,7 @@ static void dm9000_set_io(struct board_info *db, int byte_width)
case 3:
dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
+ /* fall through */
case 2:
db->dumpblk = dm9000_dumpblk_16bit;
db->outblk = dm9000_outblk_16bit;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d5026909dec5..3c7c04406a2b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1270,10 +1270,6 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
#define is_arp_allowed_on_bmc(adapter, skb) \
(is_arp(skb) && is_arp_filt_enabled(adapter))
-#define is_broadcast_packet(eh, adapter) \
- (is_multicast_ether_addr(eh->h_dest) && \
- !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
-
#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
#define is_arp_filt_enabled(adapter) \
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index d3a62bc1f1c6..71793e03c3c8 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -97,5 +97,6 @@ config GIANFAR
source "drivers/net/ethernet/freescale/dpaa/Kconfig"
source "drivers/net/ethernet/freescale/dpaa2/Kconfig"
+source "drivers/net/ethernet/freescale/enetc/Kconfig"
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 3b4ff08e3841..6a93293d31e0 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -23,3 +23,6 @@ obj-$(CONFIG_FSL_FMAN) += fman/
obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
+
+obj-$(CONFIG_FSL_ENETC) += enetc/
+obj-$(CONFIG_FSL_ENETC_VF) += enetc/
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 2f424e0a8225..d1e78cdd512f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
# Needed by the tracing framework
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
new file mode 100644
index 000000000000..a027f4a9d0cc
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include "dpaa2-eth.h"
+#include "dpaa2-eth-debugfs.h"
+
+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
+
+static struct dentry *dpaa2_dbg_root;
+
+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct rtnl_link_stats64 *stats;
+ struct dpaa2_eth_drv_stats *extras;
+ int i;
+
+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
+ "Tx SG", "Tx realloc", "Enq busy");
+
+ for_each_online_cpu(i) {
+ stats = per_cpu_ptr(priv->percpu_stats, i);
+ extras = per_cpu_ptr(priv->percpu_extras, i);
+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
+ i,
+ stats->rx_packets,
+ stats->rx_errors,
+ extras->rx_sg_frames,
+ stats->tx_packets,
+ stats->tx_errors,
+ extras->tx_conf_frames,
+ extras->tx_sg_frames,
+ extras->tx_reallocs,
+ extras->tx_portal_busy);
+ }
+
+ return 0;
+}
+
+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+ err = single_open(file, dpaa2_dbg_cpu_show, priv);
+ if (err < 0)
+ netdev_err(priv->net_dev, "single_open() failed\n");
+
+ return err;
+}
+
+static const struct file_operations dpaa2_dbg_cpu_ops = {
+ .open = dpaa2_dbg_cpu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
+{
+ switch (fq->type) {
+ case DPAA2_RX_FQ:
+ return "Rx";
+ case DPAA2_TX_CONF_FQ:
+ return "Tx conf";
+ default:
+ return "N/A";
+ }
+}
+
+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct dpaa2_eth_fq *fq;
+ u32 fcnt, bcnt;
+ int i, err;
+
+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "Type", "Frames", "Pending frames");
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+ if (err)
+ fcnt = 0;
+
+ seq_printf(file, "%5d%16d%16s%16llu%16u\n",
+ fq->fqid,
+ fq->target_cpu,
+ fq_type_to_str(fq),
+ fq->stats.frames,
+ fcnt);
+ }
+
+ return 0;
+}
+
+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+ err = single_open(file, dpaa2_dbg_fqs_show, priv);
+ if (err < 0)
+ netdev_err(priv->net_dev, "single_open() failed\n");
+
+ return err;
+}
+
+static const struct file_operations dpaa2_dbg_fq_ops = {
+ .open = dpaa2_dbg_fqs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s\n",
+ "CHID", "CPU", "Deq busy", "CDANs", "Buf count");
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ seq_printf(file, "%4d%16d%16llu%16llu%16d\n",
+ ch->ch_id,
+ ch->nctx.desired_cpu,
+ ch->stats.dequeue_portal_busy,
+ ch->stats.cdan,
+ ch->buf_count);
+ }
+
+ return 0;
+}
+
+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+ err = single_open(file, dpaa2_dbg_ch_show, priv);
+ if (err < 0)
+ netdev_err(priv->net_dev, "single_open() failed\n");
+
+ return err;
+}
+
+static const struct file_operations dpaa2_dbg_ch_ops = {
+ .open = dpaa2_dbg_ch_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
+{
+ if (!dpaa2_dbg_root)
+ return;
+
+ /* Create a directory for the interface */
+ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
+ dpaa2_dbg_root);
+ if (!priv->dbg.dir) {
+ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
+ return;
+ }
+
+ /* per-cpu stats file */
+ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
+ priv->dbg.dir, priv,
+ &dpaa2_dbg_cpu_ops);
+ if (!priv->dbg.cpu_stats) {
+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+ goto err_cpu_stats;
+ }
+
+ /* per-fq stats file */
+ priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
+ priv->dbg.dir, priv,
+ &dpaa2_dbg_fq_ops);
+ if (!priv->dbg.fq_stats) {
+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+ goto err_fq_stats;
+ }
+
+ /* per-fq stats file */
+ priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
+ priv->dbg.dir, priv,
+ &dpaa2_dbg_ch_ops);
+ if (!priv->dbg.fq_stats) {
+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+ goto err_ch_stats;
+ }
+
+ return;
+
+err_ch_stats:
+ debugfs_remove(priv->dbg.fq_stats);
+err_fq_stats:
+ debugfs_remove(priv->dbg.cpu_stats);
+err_cpu_stats:
+ debugfs_remove(priv->dbg.dir);
+}
+
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
+{
+ debugfs_remove(priv->dbg.fq_stats);
+ debugfs_remove(priv->dbg.ch_stats);
+ debugfs_remove(priv->dbg.cpu_stats);
+ debugfs_remove(priv->dbg.dir);
+}
+
+void dpaa2_eth_dbg_init(void)
+{
+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
+ if (!dpaa2_dbg_root) {
+ pr_err("DPAA2-ETH: debugfs create failed\n");
+ return;
+ }
+
+ pr_debug("DPAA2-ETH: debugfs created\n");
+}
+
+void dpaa2_eth_dbg_exit(void)
+{
+ debugfs_remove(dpaa2_dbg_root);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
new file mode 100644
index 000000000000..4f63de997a26
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#ifndef DPAA2_ETH_DEBUGFS_H
+#define DPAA2_ETH_DEBUGFS_H
+
+#include <linux/dcache.h>
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_debugfs {
+ struct dentry *dir;
+ struct dentry *fq_stats;
+ struct dentry *ch_stats;
+ struct dentry *cpu_stats;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void dpaa2_eth_dbg_init(void);
+void dpaa2_eth_dbg_exit(void);
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
+#else
+static inline void dpaa2_eth_dbg_init(void) {}
+static inline void dpaa2_eth_dbg_exit(void) {}
+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* DPAA2_ETH_DEBUGFS_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 1ca9a18139ec..04925c731f0b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1243,34 +1243,36 @@ enable_err:
return err;
}
-/* The DPIO store must be empty when we call this,
- * at the end of every NAPI cycle.
- */
-static u32 drain_channel(struct dpaa2_eth_channel *ch)
+/* Total number of in-flight frames on ingress queues */
+static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
{
- u32 drained = 0, total = 0;
+ struct dpaa2_eth_fq *fq;
+ u32 fcnt = 0, bcnt = 0, total = 0;
+ int i, err;
- do {
- pull_channel(ch);
- drained = consume_frames(ch, NULL);
- total += drained;
- } while (drained);
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+ if (err) {
+ netdev_warn(priv->net_dev, "query_fq_count failed");
+ break;
+ }
+ total += fcnt;
+ }
return total;
}
-static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
+static void wait_for_fq_empty(struct dpaa2_eth_priv *priv)
{
- struct dpaa2_eth_channel *ch;
- int i;
- u32 drained = 0;
-
- for (i = 0; i < priv->num_channels; i++) {
- ch = priv->channel[i];
- drained += drain_channel(ch);
- }
+ int retries = 10;
+ u32 pending;
- return drained;
+ do {
+ pending = ingress_fq_count(priv);
+ if (pending)
+ msleep(100);
+ } while (pending && --retries);
}
static int dpaa2_eth_stop(struct net_device *net_dev)
@@ -1278,14 +1280,22 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int dpni_enabled = 0;
int retries = 10;
- u32 drained;
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
- /* Loop while dpni_disable() attempts to drain the egress FQs
- * and confirm them back to us.
+ /* On dpni_disable(), the MC firmware will:
+ * - stop MAC Rx and wait for all Rx frames to be enqueued to software
+ * - cut off WRIOP dequeues from egress FQs and wait until transmission
+ * of all in flight Tx frames is finished (and corresponding Tx conf
+ * frames are enqueued back to software)
+ *
+ * Before calling dpni_disable(), we wait for all Tx frames to arrive
+ * on WRIOP. After it finishes, wait until all remaining frames on Rx
+ * and Tx conf queues are consumed on NAPI poll.
*/
+ msleep(500);
+
do {
dpni_disable(priv->mc_io, 0, priv->mc_token);
dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
@@ -1300,19 +1310,9 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
*/
}
- /* Wait for NAPI to complete on every core and disable it.
- * In particular, this will also prevent NAPI from being rescheduled if
- * a new CDAN is serviced, effectively discarding the CDAN. We therefore
- * don't even need to disarm the channels, except perhaps for the case
- * of a huge coalescing value.
- */
+ wait_for_fq_empty(priv);
disable_ch_napi(priv);
- /* Manually drain the Rx and TxConf queues */
- drained = drain_ingress_frames(priv);
- if (drained)
- netdev_dbg(net_dev, "Drained %d frames.\n", drained);
-
/* Empty the buffer pool */
drain_pool(priv);
@@ -3083,6 +3083,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_netdev_reg;
}
+#ifdef CONFIG_DEBUG_FS
+ dpaa2_dbg_add(priv);
+#endif
+
dev_info(dev, "Probed interface %s\n", net_dev->name);
return 0;
@@ -3126,6 +3130,9 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
+#ifdef CONFIG_DEBUG_FS
+ dpaa2_dbg_remove(priv);
+#endif
unregister_netdev(net_dev);
if (priv->do_link_poll)
@@ -3170,4 +3177,25 @@ static struct fsl_mc_driver dpaa2_eth_driver = {
.match_id_table = dpaa2_eth_match_id_table
};
-module_fsl_mc_driver(dpaa2_eth_driver);
+static int __init dpaa2_eth_driver_init(void)
+{
+ int err;
+
+ dpaa2_eth_dbg_init();
+ err = fsl_mc_driver_register(&dpaa2_eth_driver);
+ if (err) {
+ dpaa2_eth_dbg_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit dpaa2_eth_driver_exit(void)
+{
+ dpaa2_eth_dbg_exit();
+ fsl_mc_driver_unregister(&dpaa2_eth_driver);
+}
+
+module_init(dpaa2_eth_driver_init);
+module_exit(dpaa2_eth_driver_exit);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 69c965de192b..31fe486ec25f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -16,6 +16,7 @@
#include "dpni-cmd.h"
#include "dpaa2-eth-trace.h"
+#include "dpaa2-eth-debugfs.h"
#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
@@ -365,6 +366,9 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
struct bpf_prog *xdp_prog;
+#ifdef CONFIG_DEBUG_FS
+ struct dpaa2_debugfs dbg;
+#endif
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
@@ -405,6 +409,10 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
#define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries)
+/* We have exactly one {Rx, Tx conf} queue per channel */
+#define dpaa2_eth_queue_count(priv) \
+ ((priv)->num_channels)
+
enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_HASH,
DPAA2_ETH_RX_DIST_CLS
@@ -447,12 +455,6 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
DPAA2_ETH_RX_HWA_SIZE;
}
-/* We have exactly one {Rx, Tx conf} queue per channel */
-static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
-{
- return priv->num_channels;
-}
-
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_cls_key_size(void);
int dpaa2_eth_cls_fld_off(int prot, int field);
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
new file mode 100644
index 000000000000..f9dd26fbfc83
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+config FSL_ENETC
+ tristate "ENETC PF driver"
+ depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ help
+ This driver supports NXP ENETC gigabit ethernet controller PCIe
+ physical function (PF) devices, managing ENETC Ports at a privileged
+ level.
+
+ If compiled as module (M), the module name is fsl-enetc.
+
+config FSL_ENETC_VF
+ tristate "ENETC VF driver"
+ depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ help
+ This driver supports NXP ENETC gigabit ethernet controller PCIe
+ virtual function (VF) devices enabled by the ENETC PF driver.
+
+ If compiled as module (M), the module name is fsl-enetc-vf.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
new file mode 100644
index 000000000000..9529b01872ca
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
+fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o
+fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
+fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y)
+
+obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+
+ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy)
+fsl-enetc-vf-objs := enetc_vf.o
+else
+fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \
+ enetc_ethtool.o
+fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y)
+endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
new file mode 100644
index 000000000000..5bb9eb35d76d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -0,0 +1,1604 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/of_mdio.h>
+#include <linux/vmalloc.h>
+
+/* ENETC overhead: optional extension BD + 1 BD gap */
+#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
+/* max # of chained Tx BDs is 15, including head and extension BD */
+#define ENETC_MAX_SKB_FRAGS 13
+#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb);
+
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int count;
+
+ tx_ring = priv->tx_ring[skb->queue_mapping];
+
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ if (unlikely(!count))
+ goto drop_packet_err;
+
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ netif_stop_subqueue(ndev, tx_ring->index);
+
+ return NETDEV_TX_OK;
+
+drop_packet_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
+{
+ int l3_start, l3_hsize;
+ u16 l3_flags, l4_flags;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
+
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ l4_flags = ENETC_TXBD_L4_TCP;
+ break;
+ case offsetof(struct udphdr, check):
+ l4_flags = ENETC_TXBD_L4_UDP;
+ break;
+ default:
+ skb_checksum_help(skb);
+ return false;
+ }
+
+ l3_start = skb_network_offset(skb);
+ l3_hsize = skb_network_header_len(skb);
+
+ l3_flags = 0;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ l3_flags = ENETC_TXBD_L3_IPV6;
+
+ /* write BD fields */
+ txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
+ txbd->l4_csoff = l4_flags;
+
+ return true;
+}
+
+static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_dma_page)
+ dma_unmap_page(tx_ring->dev, tx_swbd->dma,
+ tx_swbd->len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(tx_ring->dev, tx_swbd->dma,
+ tx_swbd->len, DMA_TO_DEVICE);
+ tx_swbd->dma = 0;
+}
+
+static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->dma)
+ enetc_unmap_tx_buff(tx_ring, tx_swbd);
+
+ if (tx_swbd->skb) {
+ dev_kfree_skb_any(tx_swbd->skb);
+ tx_swbd->skb = NULL;
+ }
+}
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ struct enetc_tx_swbd *tx_swbd;
+ struct skb_frag_struct *frag;
+ int len = skb_headlen(skb);
+ union enetc_tx_bd temp_bd;
+ union enetc_tx_bd *txbd;
+ bool do_vlan, do_tstamp;
+ int i, count = 0;
+ unsigned int f;
+ dma_addr_t dma;
+ u8 flags = 0;
+
+ i = tx_ring->next_to_use;
+ txbd = ENETC_TXBD(*tx_ring, i);
+ prefetchw(txbd);
+
+ dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
+ goto dma_err;
+
+ temp_bd.addr = cpu_to_le64(dma);
+ temp_bd.buf_len = cpu_to_le16(len);
+ temp_bd.lstatus = 0;
+
+ tx_swbd = &tx_ring->tx_swbd[i];
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ count++;
+
+ do_vlan = skb_vlan_tag_present(skb);
+ do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+
+ if (do_vlan || do_tstamp)
+ flags |= ENETC_TXBD_FLAGS_EX;
+
+ if (enetc_tx_csum(skb, &temp_bd))
+ flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
+
+ /* first BD needs frm_len and offload flags set */
+ temp_bd.frm_len = cpu_to_le16(skb->len);
+ temp_bd.flags = flags;
+
+ if (flags & ENETC_TXBD_FLAGS_EX) {
+ u8 e_flags = 0;
+ *txbd = temp_bd;
+ enetc_clear_tx_bd(&temp_bd);
+
+ /* add extension BD for VLAN and/or timestamping */
+ flags = 0;
+ tx_swbd++;
+ txbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = tx_ring->tx_swbd;
+ txbd = ENETC_TXBD(*tx_ring, 0);
+ }
+ prefetchw(txbd);
+
+ if (do_vlan) {
+ temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ temp_bd.ext.tpid = 0; /* < C-TAG */
+ e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ if (do_tstamp) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
+ }
+
+ temp_bd.ext.e_flags = e_flags;
+ count++;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_err;
+
+ *txbd = temp_bd;
+ enetc_clear_tx_bd(&temp_bd);
+
+ flags = 0;
+ tx_swbd++;
+ txbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = tx_ring->tx_swbd;
+ txbd = ENETC_TXBD(*tx_ring, 0);
+ }
+ prefetchw(txbd);
+
+ temp_bd.addr = cpu_to_le64(dma);
+ temp_bd.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ count++;
+ }
+
+ /* last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ temp_bd.flags = flags;
+ *txbd = temp_bd;
+
+ tx_ring->tx_swbd[i].skb = skb;
+
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+
+ /* let H/W know BD ring has been updated */
+ enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
+
+ return count;
+
+dma_err:
+ dev_err(tx_ring->dev, "DMA map error");
+
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_skb(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (count--);
+
+ return 0;
+}
+
+static irqreturn_t enetc_msix(int irq, void *data)
+{
+ struct enetc_int_vector *v = data;
+ int i;
+
+ /* disable interrupts */
+ enetc_wr_reg(v->rbier, 0);
+
+ for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
+
+ napi_schedule_irqoff(&v->napi);
+
+ return IRQ_HANDLED;
+}
+
+static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
+static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit);
+
+static int enetc_poll(struct napi_struct *napi, int budget)
+{
+ struct enetc_int_vector
+ *v = container_of(napi, struct enetc_int_vector, napi);
+ bool complete = true;
+ int work_done;
+ int i;
+
+ for (i = 0; i < v->count_tx_rings; i++)
+ if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+ complete = false;
+
+ work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
+ if (work_done == budget)
+ complete = false;
+
+ if (!complete)
+ return budget;
+
+ napi_complete_done(napi, work_done);
+
+ /* enable interrupts */
+ enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
+
+ for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
+ ENETC_TBIER_TXTIE);
+
+ return work_done;
+}
+
+static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
+{
+ int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+
+ return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+}
+
+static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+{
+ struct net_device *ndev = tx_ring->ndev;
+ int tx_frm_cnt = 0, tx_byte_cnt = 0;
+ struct enetc_tx_swbd *tx_swbd;
+ int i, bds_to_clean;
+
+ i = tx_ring->next_to_clean;
+ tx_swbd = &tx_ring->tx_swbd[i];
+ bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+
+ while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
+ bool is_eof = !!tx_swbd->skb;
+
+ enetc_unmap_tx_buff(tx_ring, tx_swbd);
+ if (is_eof) {
+ napi_consume_skb(tx_swbd->skb, napi_budget);
+ tx_swbd->skb = NULL;
+ }
+
+ tx_byte_cnt += tx_swbd->len;
+
+ bds_to_clean--;
+ tx_swbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = tx_ring->tx_swbd;
+ }
+
+ /* BD iteration loop end */
+ if (is_eof) {
+ tx_frm_cnt++;
+ /* re-arm interrupt source */
+ enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
+ BIT(16 + tx_ring->index));
+ }
+
+ if (unlikely(!bds_to_clean))
+ bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+ }
+
+ tx_ring->next_to_clean = i;
+ tx_ring->stats.packets += tx_frm_cnt;
+ tx_ring->stats.bytes += tx_byte_cnt;
+
+ if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
+ __netif_subqueue_stopped(ndev, tx_ring->index) &&
+ (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ netif_wake_subqueue(ndev, tx_ring->index);
+ }
+
+ return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
+}
+
+static bool enetc_new_page(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
+ struct page *page;
+ dma_addr_t addr;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
+
+ addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
+ __free_page(page);
+
+ return false;
+ }
+
+ rx_swbd->dma = addr;
+ rx_swbd->page = page;
+ rx_swbd->page_offset = ENETC_RXB_PAD;
+
+ return true;
+}
+
+static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
+{
+ struct enetc_rx_swbd *rx_swbd;
+ union enetc_rx_bd *rxbd;
+ int i, j;
+
+ i = rx_ring->next_to_use;
+ rx_swbd = &rx_ring->rx_swbd[i];
+ rxbd = ENETC_RXBD(*rx_ring, i);
+
+ for (j = 0; j < buff_cnt; j++) {
+ /* try reuse page */
+ if (unlikely(!rx_swbd->page)) {
+ if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
+ rx_ring->stats.rx_alloc_errs++;
+ break;
+ }
+ }
+
+ /* update RxBD */
+ rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
+ rx_swbd->page_offset);
+ /* clear 'R" as well */
+ rxbd->r.lstatus = 0;
+
+ rx_swbd++;
+ rxbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rx_swbd = rx_ring->rx_swbd;
+ rxbd = ENETC_RXBD(*rx_ring, 0);
+ }
+ }
+
+ if (likely(j)) {
+ rx_ring->next_to_alloc = i; /* keep track from page reuse */
+ rx_ring->next_to_use = i;
+ /* update ENETC's consumer index */
+ enetc_wr_reg(rx_ring->rcir, i);
+ }
+
+ return j;
+}
+
+static void enetc_get_offloads(struct enetc_bdr *rx_ring,
+ union enetc_rx_bd *rxbd, struct sk_buff *skb)
+{
+ /* TODO: add tstamp, hashing */
+ if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
+ u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
+
+ skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ /* copy VLAN to skb, if one is extracted, for now we assume it's a
+ * standard TPID, but HW also supports custom values
+ */
+ if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(rxbd->r.vlan_opt));
+}
+
+static void enetc_process_skb(struct enetc_bdr *rx_ring,
+ struct sk_buff *skb)
+{
+ skb_record_rx_queue(skb, rx_ring->index);
+ skb->protocol = eth_type_trans(skb, rx_ring->ndev);
+}
+
+static bool enetc_page_reusable(struct page *page)
+{
+ return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
+}
+
+static void enetc_reuse_page(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *old)
+{
+ struct enetc_rx_swbd *new;
+
+ new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
+
+ /* next buf that may reuse a page */
+ enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
+
+ /* copy page reference */
+ *new = *old;
+}
+
+static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
+ int i, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
+ rx_swbd->page_offset,
+ size, DMA_FROM_DEVICE);
+ return rx_swbd;
+}
+
+static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
+ if (likely(enetc_page_reusable(rx_swbd->page))) {
+ rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
+ page_ref_inc(rx_swbd->page);
+
+ enetc_reuse_page(rx_ring, rx_swbd);
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
+ rx_swbd->page_offset,
+ ENETC_RXB_DMA_SIZE,
+ DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ rx_swbd->page = NULL;
+}
+
+static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
+ int i, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ struct sk_buff *skb;
+ void *ba;
+
+ ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
+ skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
+ if (unlikely(!skb)) {
+ rx_ring->stats.rx_alloc_errs++;
+ return NULL;
+ }
+
+ skb_reserve(skb, ENETC_RXB_PAD);
+ __skb_put(skb, size);
+
+ enetc_put_rx_buff(rx_ring, rx_swbd);
+
+ return skb;
+}
+
+static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
+ u16 size, struct sk_buff *skb)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
+ rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
+
+ enetc_put_rx_buff(rx_ring, rx_swbd);
+}
+
+#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
+
+static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit)
+{
+ int rx_frm_cnt = 0, rx_byte_cnt = 0;
+ int cleaned_cnt, i;
+
+ cleaned_cnt = enetc_bd_unused(rx_ring);
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd;
+ struct sk_buff *skb;
+ u32 bd_status;
+ u16 size;
+
+ if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
+ int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+
+ cleaned_cnt -= count;
+ }
+
+ rxbd = ENETC_RXBD(*rx_ring, i);
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ if (!bd_status)
+ break;
+
+ enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
+ dma_rmb(); /* for reading other rxbd fields */
+ size = le16_to_cpu(rxbd->r.buf_len);
+ skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
+ if (!skb)
+ break;
+
+ enetc_get_offloads(rx_ring, rxbd, skb);
+
+ cleaned_cnt++;
+ rxbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rxbd = ENETC_RXBD(*rx_ring, 0);
+ }
+
+ if (unlikely(bd_status &
+ ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
+ dev_kfree_skb(skb);
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ dma_rmb();
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ rxbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rxbd = ENETC_RXBD(*rx_ring, 0);
+ }
+ }
+
+ rx_ring->ndev->stats.rx_dropped++;
+ rx_ring->ndev->stats.rx_errors++;
+
+ break;
+ }
+
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ size = ENETC_RXB_DMA_SIZE;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu(rxbd->r.buf_len);
+ }
+
+ enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
+
+ cleaned_cnt++;
+ rxbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rxbd = ENETC_RXBD(*rx_ring, 0);
+ }
+ }
+
+ rx_byte_cnt += skb->len;
+
+ enetc_process_skb(rx_ring, skb);
+
+ napi_gro_receive(napi, skb);
+
+ rx_frm_cnt++;
+ }
+
+ rx_ring->next_to_clean = i;
+
+ rx_ring->stats.packets += rx_frm_cnt;
+ rx_ring->stats.bytes += rx_byte_cnt;
+
+ return rx_frm_cnt;
+}
+
+/* Probing and Init */
+#define ENETC_MAX_RFS_SIZE 64
+void enetc_get_si_caps(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ /* find out how many of various resources we have to work with */
+ val = enetc_rd(hw, ENETC_SICAPR0);
+ si->num_rx_rings = (val >> 16) & 0xff;
+ si->num_tx_rings = val & 0xff;
+
+ val = enetc_rd(hw, ENETC_SIRFSCAPR);
+ si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
+ si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+
+ si->num_rss = 0;
+ val = enetc_rd(hw, ENETC_SIPCAPR0);
+ if (val & ENETC_SIPCAPR0_RSS) {
+ val = enetc_rd(hw, ENETC_SIRSSCAPR);
+ si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val);
+ }
+}
+
+static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+{
+ r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
+ &r->bd_dma_base, GFP_KERNEL);
+ if (!r->bd_base)
+ return -ENOMEM;
+
+ /* h/w requires 128B alignment */
+ if (!IS_ALIGNED(r->bd_dma_base, 128)) {
+ dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
+ r->bd_dma_base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int enetc_alloc_txbdr(struct enetc_bdr *txr)
+{
+ int err;
+
+ txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
+ if (!txr->tx_swbd)
+ return -ENOMEM;
+
+ err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
+ if (err) {
+ vfree(txr->tx_swbd);
+ return err;
+ }
+
+ txr->next_to_clean = 0;
+ txr->next_to_use = 0;
+
+ return 0;
+}
+
+static void enetc_free_txbdr(struct enetc_bdr *txr)
+{
+ int size, i;
+
+ for (i = 0; i < txr->bd_count; i++)
+ enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
+
+ size = txr->bd_count * sizeof(union enetc_tx_bd);
+
+ dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
+ txr->bd_base = NULL;
+
+ vfree(txr->tx_swbd);
+ txr->tx_swbd = NULL;
+}
+
+static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
+{
+ int i, err;
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ err = enetc_alloc_txbdr(priv->tx_ring[i]);
+
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i-- > 0)
+ enetc_free_txbdr(priv->tx_ring[i]);
+
+ return err;
+}
+
+static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_free_txbdr(priv->tx_ring[i]);
+}
+
+static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
+{
+ int err;
+
+ rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
+ if (!rxr->rx_swbd)
+ return -ENOMEM;
+
+ err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
+ if (err) {
+ vfree(rxr->rx_swbd);
+ return err;
+ }
+
+ rxr->next_to_clean = 0;
+ rxr->next_to_use = 0;
+ rxr->next_to_alloc = 0;
+
+ return 0;
+}
+
+static void enetc_free_rxbdr(struct enetc_bdr *rxr)
+{
+ int size;
+
+ size = rxr->bd_count * sizeof(union enetc_rx_bd);
+
+ dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
+ rxr->bd_base = NULL;
+
+ vfree(rxr->rx_swbd);
+ rxr->rx_swbd = NULL;
+}
+
+static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
+{
+ int i, err;
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ err = enetc_alloc_rxbdr(priv->rx_ring[i]);
+
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i-- > 0)
+ enetc_free_rxbdr(priv->rx_ring[i]);
+
+ return err;
+}
+
+static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_free_rxbdr(priv->rx_ring[i]);
+}
+
+static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+{
+ int i;
+
+ if (!tx_ring->tx_swbd)
+ return;
+
+ for (i = 0; i < tx_ring->bd_count; i++) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_skb(tx_ring, tx_swbd);
+ }
+
+ tx_ring->next_to_clean = 0;
+ tx_ring->next_to_use = 0;
+}
+
+static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
+{
+ int i;
+
+ if (!rx_ring->rx_swbd)
+ return;
+
+ for (i = 0; i < rx_ring->bd_count; i++) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+ if (!rx_swbd->page)
+ continue;
+
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(rx_swbd->page);
+ rx_swbd->page = NULL;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_alloc = 0;
+}
+
+static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_free_rx_ring(priv->rx_ring[i]);
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_free_tx_ring(priv->tx_ring[i]);
+}
+
+static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+{
+ int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+ cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
+ GFP_KERNEL);
+ if (!cbdr->bd_base)
+ return -ENOMEM;
+
+ /* h/w requires 128B alignment */
+ if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
+ dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
+ return -EINVAL;
+ }
+
+ cbdr->next_to_clean = 0;
+ cbdr->next_to_use = 0;
+
+ return 0;
+}
+
+static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+{
+ int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+ dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
+ cbdr->bd_base = NULL;
+}
+
+static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
+{
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
+
+ enetc_wr(hw, ENETC_SICBDRPIR, 0);
+ enetc_wr(hw, ENETC_SICBDRCIR, 0);
+
+ /* enable ring */
+ enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
+
+ cbdr->pir = hw->reg + ENETC_SICBDRPIR;
+ cbdr->cir = hw->reg + ENETC_SICBDRCIR;
+}
+
+static void enetc_clear_cbdr(struct enetc_hw *hw)
+{
+ enetc_wr(hw, ENETC_SICBDRMR, 0);
+}
+
+static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
+{
+ int *rss_table;
+ int i;
+
+ rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
+ if (!rss_table)
+ return -ENOMEM;
+
+ /* Set up RSS table defaults */
+ for (i = 0; i < si->num_rss; i++)
+ rss_table[i] = i % num_groups;
+
+ enetc_set_rss_table(si, rss_table, si->num_rss);
+
+ kfree(rss_table);
+
+ return 0;
+}
+
+static int enetc_configure_si(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ int err;
+
+ enetc_setup_cbdr(hw, &si->cbd_ring);
+ /* set SI cache attributes */
+ enetc_wr(hw, ENETC_SICAR0,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+ enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
+ /* enable SI */
+ enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+
+ if (si->num_rss) {
+ err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+ int cpus = num_online_cpus();
+
+ priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
+ priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
+
+ /* Enable all available TX rings in order to configure as many
+ * priorities as possible, when needed.
+ * TODO: Make # of TX rings run-time configurable
+ */
+ priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
+ priv->num_tx_rings = si->num_tx_rings;
+ priv->bdr_int_num = cpus;
+
+ /* SI specific */
+ si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
+}
+
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+ int err;
+
+ err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
+ if (err)
+ return err;
+
+ priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
+ GFP_KERNEL);
+ if (!priv->cls_rules) {
+ err = -ENOMEM;
+ goto err_alloc_cls;
+ }
+
+ err = enetc_configure_si(priv);
+ if (err)
+ goto err_config_si;
+
+ return 0;
+
+err_config_si:
+ kfree(priv->cls_rules);
+err_alloc_cls:
+ enetc_clear_cbdr(&si->hw);
+ enetc_free_cbdr(priv->dev, &si->cbd_ring);
+
+ return err;
+}
+
+void enetc_free_si_resources(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+
+ enetc_clear_cbdr(&si->hw);
+ enetc_free_cbdr(priv->dev, &si->cbd_ring);
+
+ kfree(priv->cls_rules);
+}
+
+static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int idx = tx_ring->index;
+ u32 tbmr;
+
+ enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
+ lower_32_bits(tx_ring->bd_dma_base));
+
+ enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
+ upper_32_bits(tx_ring->bd_dma_base));
+
+ WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
+ enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
+ ENETC_RTBLENR_LEN(tx_ring->bd_count));
+
+ /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
+ tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
+ tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
+
+ /* enable Tx ints by setting pkt thr to 1 */
+ enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
+
+ tbmr = ENETC_TBMR_EN;
+ if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ tbmr |= ENETC_TBMR_VIH;
+
+ /* enable ring */
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+
+ tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
+ tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
+ tx_ring->idr = hw->reg + ENETC_SITXIDR;
+}
+
+static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+ u32 rbmr;
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
+ lower_32_bits(rx_ring->bd_dma_base));
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
+ upper_32_bits(rx_ring->bd_dma_base));
+
+ WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
+ ENETC_RTBLENR_LEN(rx_ring->bd_count));
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+
+ /* enable Rx ints by setting pkt thr to 1 */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
+
+ rbmr = ENETC_RBMR_EN;
+ if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rbmr |= ENETC_RBMR_VTE;
+
+ rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
+ rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+
+ enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
+
+ /* enable ring */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+}
+
+static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+}
+
+static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+
+ /* disable EN bit on ring */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
+}
+
+static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int delay = 8, timeout = 100;
+ int idx = tx_ring->index;
+
+ /* disable EN bit on ring */
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+
+ /* wait for busy to clear */
+ while (delay < timeout &&
+ enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
+ msleep(delay);
+ delay *= 2;
+ }
+
+ if (delay >= timeout)
+ netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
+ idx);
+}
+
+static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+
+ udelay(1);
+}
+
+static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
+{
+ struct pci_dev *pdev = priv->si->pdev;
+ cpumask_t cpu_mask;
+ int i, j, err;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+ struct enetc_int_vector *v = priv->int_vector[i];
+ int entry = ENETC_BDR_INT_BASE_IDX + i;
+ struct enetc_hw *hw = &priv->si->hw;
+
+ snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
+ priv->ndev->name, i);
+ err = request_irq(irq, enetc_msix, 0, v->name, v);
+ if (err) {
+ dev_err(priv->dev, "request_irq() failed!\n");
+ goto irq_err;
+ }
+
+ v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
+ v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
+
+ enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
+
+ for (j = 0; j < v->count_tx_rings; j++) {
+ int idx = v->tx_ring[j].index;
+
+ enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(irq, &cpu_mask);
+ }
+
+ return 0;
+
+irq_err:
+ while (i--) {
+ int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, priv->int_vector[i]);
+ }
+
+ return err;
+}
+
+static void enetc_free_irqs(struct enetc_ndev_priv *priv)
+{
+ struct pci_dev *pdev = priv->si->pdev;
+ int i;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, priv->int_vector[i]);
+ }
+}
+
+static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ /* enable Tx & Rx event indication */
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ enetc_rxbdr_wr(&priv->si->hw, i,
+ ENETC_RBIER, ENETC_RBIER_RXTIE);
+ }
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ enetc_txbdr_wr(&priv->si->hw, i,
+ ENETC_TBIER, ENETC_TBIER_TXTIE);
+ }
+}
+
+static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+}
+
+static void adjust_link(struct net_device *ndev)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ phy_print_status(phydev);
+}
+
+static int enetc_phy_connect(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev;
+
+ if (!priv->phy_node)
+ return 0; /* phy-less mode */
+
+ phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
+ 0, priv->if_mode);
+ if (!phydev) {
+ dev_err(&ndev->dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+int enetc_open(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i, err;
+
+ err = enetc_setup_irqs(priv);
+ if (err)
+ return err;
+
+ err = enetc_phy_connect(ndev);
+ if (err)
+ goto err_phy_connect;
+
+ err = enetc_alloc_tx_resources(priv);
+ if (err)
+ goto err_alloc_tx;
+
+ err = enetc_alloc_rx_resources(priv);
+ if (err)
+ goto err_alloc_rx;
+
+ enetc_setup_bdrs(priv);
+
+ err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
+ if (err)
+ goto err_set_queues;
+
+ for (i = 0; i < priv->bdr_int_num; i++)
+ napi_enable(&priv->int_vector[i]->napi);
+
+ enetc_enable_interrupts(priv);
+
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
+ else
+ netif_carrier_on(ndev);
+
+ netif_tx_start_all_queues(ndev);
+
+ return 0;
+
+err_set_queues:
+ enetc_free_rx_resources(priv);
+err_alloc_rx:
+ enetc_free_tx_resources(priv);
+err_alloc_tx:
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+err_phy_connect:
+ enetc_free_irqs(priv);
+
+ return err;
+}
+
+int enetc_close(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i;
+
+ netif_tx_stop_all_queues(ndev);
+
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ } else {
+ netif_carrier_off(ndev);
+ }
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ napi_synchronize(&priv->int_vector[i]->napi);
+ napi_disable(&priv->int_vector[i]->napi);
+ }
+
+ enetc_disable_interrupts(priv);
+ enetc_clear_bdrs(priv);
+
+ enetc_free_rxtx_rings(priv);
+ enetc_free_rx_resources(priv);
+ enetc_free_tx_resources(priv);
+ enetc_free_irqs(priv);
+
+ return 0;
+}
+
+struct net_device_stats *enetc_get_stats(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned long packets = 0, bytes = 0;
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ packets += priv->rx_ring[i]->stats.packets;
+ bytes += priv->rx_ring[i]->stats.bytes;
+ }
+
+ stats->rx_packets = packets;
+ stats->rx_bytes = bytes;
+ bytes = 0;
+ packets = 0;
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ packets += priv->tx_ring[i]->stats.packets;
+ bytes += priv->tx_ring[i]->stats.bytes;
+ }
+
+ stats->tx_packets = packets;
+ stats->tx_bytes = bytes;
+
+ return stats;
+}
+
+static int enetc_set_rss(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+ reg = enetc_rd(hw, ENETC_SIMR);
+ reg &= ~ENETC_SIMR_RSSE;
+ reg |= (en) ? ENETC_SIMR_RSSE : 0;
+ enetc_wr(hw, ENETC_SIMR, reg);
+
+ return 0;
+}
+
+int enetc_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+
+ if (changed & NETIF_F_RXHASH)
+ enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
+
+ return 0;
+}
+
+int enetc_alloc_msix(struct enetc_ndev_priv *priv)
+{
+ struct pci_dev *pdev = priv->si->pdev;
+ int size, v_tx_rings;
+ int i, n, err, nvec;
+
+ nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
+ /* allocate MSIX for both messaging and Rx/Tx interrupts */
+ n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+
+ if (n < 0)
+ return n;
+
+ if (n != nvec)
+ return -EPERM;
+
+ /* # of tx rings per int vector */
+ v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+ size = sizeof(struct enetc_int_vector) +
+ sizeof(struct enetc_bdr) * v_tx_rings;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ struct enetc_int_vector *v;
+ struct enetc_bdr *bdr;
+ int j;
+
+ v = kzalloc(size, GFP_KERNEL);
+ if (!v) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ priv->int_vector[i] = v;
+
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll,
+ NAPI_POLL_WEIGHT);
+ v->count_tx_rings = v_tx_rings;
+
+ for (j = 0; j < v_tx_rings; j++) {
+ int idx;
+
+ /* default tx ring mapping policy */
+ if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
+ idx = 2 * j + i; /* 2 CPUs */
+ else
+ idx = j + i * v_tx_rings; /* default */
+
+ __set_bit(idx, &v->tx_rings_map);
+ bdr = &v->tx_ring[j];
+ bdr->index = idx;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->tx_bd_count;
+ priv->tx_ring[idx] = bdr;
+ }
+
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ priv->rx_ring[i] = bdr;
+ }
+
+ return 0;
+
+fail:
+ while (i--) {
+ netif_napi_del(&priv->int_vector[i]->napi);
+ kfree(priv->int_vector[i]);
+ }
+
+ pci_free_irq_vectors(pdev);
+
+ return err;
+}
+
+void enetc_free_msix(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ struct enetc_int_vector *v = priv->int_vector[i];
+
+ netif_napi_del(&v->napi);
+ }
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ priv->rx_ring[i] = NULL;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ priv->tx_ring[i] = NULL;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ kfree(priv->int_vector[i]);
+ priv->int_vector[i] = NULL;
+ }
+
+ /* disable all MSIX for this device */
+ pci_free_irq_vectors(priv->si->pdev);
+}
+
+static void enetc_kfree_si(struct enetc_si *si)
+{
+ char *p = (char *)si - si->pad;
+
+ kfree(p);
+}
+
+static void enetc_detect_errata(struct enetc_si *si)
+{
+ if (si->pdev->revision == ENETC_REV1)
+ si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
+ ENETC_ERR_UCMCSWP;
+}
+
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
+{
+ struct enetc_si *si, *p;
+ struct enetc_hw *hw;
+ size_t alloc_size;
+ int err, len;
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "device enable failed\n");
+ return err;
+ }
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+ }
+
+ err = pci_request_mem_regions(pdev, name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
+ goto err_pci_mem_reg;
+ }
+
+ pci_set_master(pdev);
+
+ alloc_size = sizeof(struct enetc_si);
+ if (sizeof_priv) {
+ /* align priv to 32B */
+ alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
+ alloc_size += sizeof_priv;
+ }
+ /* force 32B alignment for enetc_si */
+ alloc_size += ENETC_SI_ALIGN - 1;
+
+ p = kzalloc(alloc_size, GFP_KERNEL);
+ if (!p) {
+ err = -ENOMEM;
+ goto err_alloc_si;
+ }
+
+ si = PTR_ALIGN(p, ENETC_SI_ALIGN);
+ si->pad = (char *)si - (char *)p;
+
+ pci_set_drvdata(pdev, si);
+ si->pdev = pdev;
+ hw = &si->hw;
+
+ len = pci_resource_len(pdev, ENETC_BAR_REGS);
+ hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
+ if (!hw->reg) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "ioremap() failed\n");
+ goto err_ioremap;
+ }
+ if (len > ENETC_PORT_BASE)
+ hw->port = hw->reg + ENETC_PORT_BASE;
+ if (len > ENETC_GLOBAL_BASE)
+ hw->global = hw->reg + ENETC_GLOBAL_BASE;
+
+ enetc_detect_errata(si);
+
+ return 0;
+
+err_ioremap:
+ enetc_kfree_si(si);
+err_alloc_si:
+ pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+err_dma:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+void enetc_pci_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_hw *hw = &si->hw;
+
+ iounmap(hw->reg);
+ enetc_kfree_si(si);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
new file mode 100644
index 000000000000..b274135c5103
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+#include "enetc_hw.h"
+
+#define ENETC_MAC_MAXFRM_SIZE 9600
+#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
+ (ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+
+struct enetc_tx_swbd {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u16 len;
+ u16 is_dma_page;
+};
+
+#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
+#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
+#define ENETC_RXB_DMA_SIZE \
+ (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+
+struct enetc_rx_swbd {
+ dma_addr_t dma;
+ struct page *page;
+ u16 page_offset;
+};
+
+struct enetc_ring_stats {
+ unsigned int packets;
+ unsigned int bytes;
+ unsigned int rx_alloc_errs;
+};
+
+#define ENETC_BDR_DEFAULT_SIZE 1024
+#define ENETC_DEFAULT_TX_WORK 256
+
+struct enetc_bdr {
+ struct device *dev; /* for DMA mapping */
+ struct net_device *ndev;
+ void *bd_base; /* points to Rx or Tx BD ring */
+ union {
+ void __iomem *tpir;
+ void __iomem *rcir;
+ };
+ u16 index;
+ int bd_count; /* # of BDs */
+ int next_to_use;
+ int next_to_clean;
+ union {
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_rx_swbd *rx_swbd;
+ };
+ union {
+ void __iomem *tcir; /* Tx */
+ int next_to_alloc; /* Rx */
+ };
+ void __iomem *idr; /* Interrupt Detect Register pointer */
+
+ struct enetc_ring_stats stats;
+
+ dma_addr_t bd_dma_base;
+} ____cacheline_aligned_in_smp;
+
+static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
+{
+ if (unlikely(++*i == bdr->bd_count))
+ *i = 0;
+}
+
+static inline int enetc_bd_unused(struct enetc_bdr *bdr)
+{
+ if (bdr->next_to_clean > bdr->next_to_use)
+ return bdr->next_to_clean - bdr->next_to_use - 1;
+
+ return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
+}
+
+/* Control BD ring */
+#define ENETC_CBDR_DEFAULT_SIZE 64
+struct enetc_cbdr {
+ void *bd_base; /* points to Rx or Tx BD ring */
+ void __iomem *pir;
+ void __iomem *cir;
+
+ int bd_count; /* # of BDs */
+ int next_to_use;
+ int next_to_clean;
+
+ dma_addr_t bd_dma_base;
+};
+
+#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
+#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i]))
+
+struct enetc_msg_swbd {
+ void *vaddr;
+ dma_addr_t dma;
+ int size;
+};
+
+#define ENETC_REV1 0x1
+enum enetc_errata {
+ ENETC_ERR_TXCSUM = BIT(0),
+ ENETC_ERR_VLAN_ISOL = BIT(1),
+ ENETC_ERR_UCMCSWP = BIT(2),
+};
+
+/* PCI IEP device data */
+struct enetc_si {
+ struct pci_dev *pdev;
+ struct enetc_hw hw;
+ enum enetc_errata errata;
+
+ struct net_device *ndev; /* back ref. */
+
+ struct enetc_cbdr cbd_ring;
+
+ int num_rx_rings; /* how many rings are available in the SI */
+ int num_tx_rings;
+ int num_fs_entries;
+ int num_rss; /* number of RSS buckets */
+ unsigned short pad;
+};
+
+#define ENETC_SI_ALIGN 32
+
+static inline void *enetc_si_priv(const struct enetc_si *si)
+{
+ return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
+}
+
+static inline bool enetc_si_is_pf(struct enetc_si *si)
+{
+ return !!(si->hw.port);
+}
+
+#define ENETC_MAX_NUM_TXQS 8
+#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
+
+struct enetc_int_vector {
+ void __iomem *rbier;
+ void __iomem *tbier_base;
+ unsigned long tx_rings_map;
+ int count_tx_rings;
+ struct napi_struct napi;
+ char name[ENETC_INT_NAME_MAX];
+
+ struct enetc_bdr rx_ring ____cacheline_aligned_in_smp;
+ struct enetc_bdr tx_ring[0];
+};
+
+struct enetc_cls_rule {
+ struct ethtool_rx_flow_spec fs;
+ int used;
+};
+
+#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+
+struct enetc_ndev_priv {
+ struct net_device *ndev;
+ struct device *dev; /* dma-mapping device */
+ struct enetc_si *si;
+
+ int bdr_int_num; /* number of Rx/Tx ring interrupts */
+ struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
+ u16 num_rx_rings, num_tx_rings;
+ u16 rx_bd_count, tx_bd_count;
+
+ u16 msg_enable;
+
+ struct enetc_bdr *tx_ring[16];
+ struct enetc_bdr *rx_ring[16];
+
+ struct enetc_cls_rule *cls_rules;
+
+ struct device_node *phy_node;
+ phy_interface_t if_mode;
+};
+
+/* Messaging */
+
+/* VF-PF set primary MAC address message format */
+struct enetc_msg_cmd_set_primary_mac {
+ struct enetc_msg_cmd_header header;
+ struct sockaddr mac;
+};
+
+#define ENETC_CBD(R, i) (&(((struct enetc_cbd *)((R).bd_base))[i]))
+
+#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
+
+/* SI common */
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
+void enetc_pci_remove(struct pci_dev *pdev);
+int enetc_alloc_msix(struct enetc_ndev_priv *priv);
+void enetc_free_msix(struct enetc_ndev_priv *priv);
+void enetc_get_si_caps(struct enetc_si *si);
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
+void enetc_free_si_resources(struct enetc_ndev_priv *priv);
+
+int enetc_open(struct net_device *ndev);
+int enetc_close(struct net_device *ndev);
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
+struct net_device_stats *enetc_get_stats(struct net_device *ndev);
+int enetc_set_features(struct net_device *ndev,
+ netdev_features_t features);
+/* ethtool */
+void enetc_set_ethtool_ops(struct net_device *ndev);
+
+/* control buffer descriptor ring (CBDR) */
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+ char *mac_addr, int si_map);
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
+int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
+ int index);
+void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
new file mode 100644
index 000000000000..de466b71bf8f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+
+static void enetc_clean_cbdr(struct enetc_si *si)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ struct enetc_cbd *dest_cbd;
+ int i, status;
+
+ i = ring->next_to_clean;
+
+ while (enetc_rd_reg(ring->cir) != i) {
+ dest_cbd = ENETC_CBD(*ring, i);
+ status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
+ if (status)
+ dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
+ status, dest_cbd->cmd);
+
+ memset(dest_cbd, 0, sizeof(*dest_cbd));
+
+ i = (i + 1) % ring->bd_count;
+ }
+
+ ring->next_to_clean = i;
+}
+
+static int enetc_cbd_unused(struct enetc_cbdr *r)
+{
+ return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
+ r->bd_count;
+}
+
+static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ int timeout = ENETC_CBDR_TIMEOUT;
+ struct enetc_cbd *dest_cbd;
+ int i;
+
+ if (unlikely(!ring->bd_base))
+ return -EIO;
+
+ if (unlikely(!enetc_cbd_unused(ring)))
+ enetc_clean_cbdr(si);
+
+ i = ring->next_to_use;
+ dest_cbd = ENETC_CBD(*ring, i);
+
+ /* copy command to the ring */
+ *dest_cbd = *cbd;
+ i = (i + 1) % ring->bd_count;
+
+ ring->next_to_use = i;
+ /* let H/W know BD ring has been updated */
+ enetc_wr_reg(ring->pir, i);
+
+ do {
+ if (enetc_rd_reg(ring->cir) == i)
+ break;
+ udelay(10); /* cannot sleep, rtnl_lock() */
+ timeout -= 10;
+ } while (timeout);
+
+ if (!timeout)
+ return -EBUSY;
+
+ enetc_clean_cbdr(si);
+
+ return 0;
+}
+
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
+{
+ struct enetc_cbd cbd;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.cls = 1;
+ cbd.status_flags = ENETC_CBD_FLAGS_SF;
+ cbd.index = cpu_to_le16(index);
+
+ return enetc_send_cmd(si, &cbd);
+}
+
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+ char *mac_addr, int si_map)
+{
+ struct enetc_cbd cbd;
+ u32 upper;
+ u16 lower;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ /* fill up the "set" descriptor */
+ cbd.cls = 1;
+ cbd.status_flags = ENETC_CBD_FLAGS_SF;
+ cbd.index = cpu_to_le16(index);
+ cbd.opt[3] = cpu_to_le32(si_map);
+ /* enable entry */
+ cbd.opt[0] = cpu_to_le32(BIT(31));
+
+ upper = *(const u32 *)mac_addr;
+ lower = *(const u16 *)(mac_addr + 4);
+ cbd.addr[0] = cpu_to_le32(upper);
+ cbd.addr[1] = cpu_to_le32(lower);
+
+ return enetc_send_cmd(si, &cbd);
+}
+
+#define RFSE_ALIGN 64
+/* Set entry in RFS table */
+int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
+ int index)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ dma_addr_t dma, dma_align;
+ void *tmp, *tmp_align;
+ int err;
+
+ /* fill up the "set" descriptor */
+ cbd.cmd = 0;
+ cbd.cls = 4;
+ cbd.index = cpu_to_le16(index);
+ cbd.length = cpu_to_le16(sizeof(*rfse));
+ cbd.opt[3] = cpu_to_le32(0); /* SI */
+
+ tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+ &dma, GFP_KERNEL);
+ if (!tmp) {
+ dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
+ return -ENOMEM;
+ }
+
+ dma_align = ALIGN(dma, RFSE_ALIGN);
+ tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
+ memcpy(tmp_align, rfse, sizeof(*rfse));
+
+ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+
+ err = enetc_send_cmd(si, &cbd);
+ if (err)
+ dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
+
+ dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+ tmp, dma);
+
+ return err;
+}
+
+#define RSSE_ALIGN 64
+static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
+ bool read)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ dma_addr_t dma, dma_align;
+ u8 *tmp, *tmp_align;
+ int err, i;
+
+ if (count < RSSE_ALIGN)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
+ &dma, GFP_KERNEL);
+ if (!tmp) {
+ dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
+ return -ENOMEM;
+ }
+ dma_align = ALIGN(dma, RSSE_ALIGN);
+ tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
+
+ if (!read)
+ for (i = 0; i < count; i++)
+ tmp_align[i] = (u8)(table[i]);
+
+ /* fill up the descriptor */
+ cbd.cmd = read ? 2 : 1;
+ cbd.cls = 3;
+ cbd.length = cpu_to_le16(count);
+
+ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+
+ err = enetc_send_cmd(si, &cbd);
+ if (err)
+ dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
+
+ if (read)
+ for (i = 0; i < count; i++)
+ table[i] = tmp_align[i];
+
+ dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
+
+ return err;
+}
+
+/* Get RSS table */
+int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+ return enetc_cmd_rss_table(si, table, count, true);
+}
+
+/* Set RSS table */
+int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+ return enetc_cmd_rss_table(si, (u32 *)table, count, false);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
new file mode 100644
index 000000000000..1ecad9ffabae
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/net_tstamp.h>
+#include <linux/module.h>
+#include "enetc.h"
+
+static const u32 enetc_si_regs[] = {
+ ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR,
+ ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR,
+ ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1,
+ ENETC_SIUEFDCR
+};
+
+static const u32 enetc_txbdr_regs[] = {
+ ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1,
+ ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER
+};
+
+static const u32 enetc_rxbdr_regs[] = {
+ ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0,
+ ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICIR0, ENETC_RBIER
+};
+
+static const u32 enetc_port_regs[] = {
+ ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0),
+ ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1,
+ ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0),
+ ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE
+};
+
+static int enetc_get_reglen(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int len;
+
+ len = ARRAY_SIZE(enetc_si_regs);
+ len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings;
+ len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings;
+
+ if (hw->port)
+ len += ARRAY_SIZE(enetc_port_regs);
+
+ len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */
+
+ return len;
+}
+
+static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+ void *regbuf)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 *buf = (u32 *)regbuf;
+ int i, j;
+ u32 addr;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) {
+ *buf++ = enetc_si_regs[i];
+ *buf++ = enetc_rd(hw, enetc_si_regs[i]);
+ }
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) {
+ addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]);
+
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+ }
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) {
+ addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]);
+
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+ }
+
+ if (!hw->port)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) {
+ addr = ENETC_PORT_BASE + enetc_port_regs[i];
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+}
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN];
+} enetc_si_counters[] = {
+ { ENETC_SIROCT, "SI rx octets" },
+ { ENETC_SIRFRM, "SI rx frames" },
+ { ENETC_SIRUCA, "SI rx u-cast frames" },
+ { ENETC_SIRMCA, "SI rx m-cast frames" },
+ { ENETC_SITOCT, "SI tx octets" },
+ { ENETC_SITFRM, "SI tx frames" },
+ { ENETC_SITUCA, "SI tx u-cast frames" },
+ { ENETC_SITMCA, "SI tx m-cast frames" },
+ { ENETC_RBDCR(0), "Rx ring 0 discarded frames" },
+ { ENETC_RBDCR(1), "Rx ring 1 discarded frames" },
+ { ENETC_RBDCR(2), "Rx ring 2 discarded frames" },
+ { ENETC_RBDCR(3), "Rx ring 3 discarded frames" },
+ { ENETC_RBDCR(4), "Rx ring 4 discarded frames" },
+ { ENETC_RBDCR(5), "Rx ring 5 discarded frames" },
+ { ENETC_RBDCR(6), "Rx ring 6 discarded frames" },
+ { ENETC_RBDCR(7), "Rx ring 7 discarded frames" },
+ { ENETC_RBDCR(8), "Rx ring 8 discarded frames" },
+ { ENETC_RBDCR(9), "Rx ring 9 discarded frames" },
+ { ENETC_RBDCR(10), "Rx ring 10 discarded frames" },
+ { ENETC_RBDCR(11), "Rx ring 11 discarded frames" },
+ { ENETC_RBDCR(12), "Rx ring 12 discarded frames" },
+ { ENETC_RBDCR(13), "Rx ring 13 discarded frames" },
+ { ENETC_RBDCR(14), "Rx ring 14 discarded frames" },
+ { ENETC_RBDCR(15), "Rx ring 15 discarded frames" },
+};
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN];
+} enetc_port_counters[] = {
+ { ENETC_PM0_REOCT, "MAC rx ethernet octets" },
+ { ENETC_PM0_RALN, "MAC rx alignment errors" },
+ { ENETC_PM0_RXPF, "MAC rx valid pause frames" },
+ { ENETC_PM0_RFRM, "MAC rx valid frames" },
+ { ENETC_PM0_RFCS, "MAC rx fcs errors" },
+ { ENETC_PM0_RVLAN, "MAC rx VLAN frames" },
+ { ENETC_PM0_RERR, "MAC rx frame errors" },
+ { ENETC_PM0_RUCA, "MAC rx unicast frames" },
+ { ENETC_PM0_RMCA, "MAC rx multicast frames" },
+ { ENETC_PM0_RBCA, "MAC rx broadcast frames" },
+ { ENETC_PM0_RDRP, "MAC rx dropped packets" },
+ { ENETC_PM0_RPKT, "MAC rx packets" },
+ { ENETC_PM0_RUND, "MAC rx undersized packets" },
+ { ENETC_PM0_R64, "MAC rx 64 byte packets" },
+ { ENETC_PM0_R127, "MAC rx 65-127 byte packets" },
+ { ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
+ { ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
+ { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
+ { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" },
+ { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
+ { ENETC_PM0_ROVR, "MAC rx oversized packets" },
+ { ENETC_PM0_RJBR, "MAC rx jabber packets" },
+ { ENETC_PM0_RFRG, "MAC rx fragment packets" },
+ { ENETC_PM0_RCNP, "MAC rx control packets" },
+ { ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
+ { ENETC_PM0_TEOCT, "MAC tx ethernet octets" },
+ { ENETC_PM0_TOCT, "MAC tx octets" },
+ { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" },
+ { ENETC_PM0_TXPF, "MAC tx valid pause frames" },
+ { ENETC_PM0_TFRM, "MAC tx frames" },
+ { ENETC_PM0_TFCS, "MAC tx fcs errors" },
+ { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
+ { ENETC_PM0_TERR, "MAC tx frames" },
+ { ENETC_PM0_TUCA, "MAC tx unicast frames" },
+ { ENETC_PM0_TMCA, "MAC tx multicast frames" },
+ { ENETC_PM0_TBCA, "MAC tx broadcast frames" },
+ { ENETC_PM0_TPKT, "MAC tx packets" },
+ { ENETC_PM0_TUND, "MAC tx undersized packets" },
+ { ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
+ { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
+ { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" },
+ { ENETC_PM0_TCNP, "MAC tx control packets" },
+ { ENETC_PM0_TDFR, "MAC tx deferred packets" },
+ { ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
+ { ENETC_PM0_TSCOL, "MAC tx single collisions" },
+ { ENETC_PM0_TLCOL, "MAC tx late collisions" },
+ { ENETC_PM0_TECOL, "MAC tx excessive collisions" },
+ { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
+ { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
+ { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
+ { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
+ { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
+ { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
+ { ENETC_PFDMSAPR, "SI pruning discarded frames" },
+ { ENETC_PICDR(0), "ICM DR0 discarded frames" },
+ { ENETC_PICDR(1), "ICM DR1 discarded frames" },
+ { ENETC_PICDR(2), "ICM DR2 discarded frames" },
+ { ENETC_PICDR(3), "ICM DR3 discarded frames" },
+};
+
+static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
+ "Rx ring %2d frames",
+ "Rx ring %2d alloc errors",
+};
+
+static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
+ "Tx ring %2d frames",
+};
+
+static int enetc_get_sset_count(struct net_device *ndev, int sset)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(enetc_si_counters) +
+ ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
+ ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings +
+ (enetc_si_is_pf(priv->si) ?
+ ARRAY_SIZE(enetc_port_counters) : 0);
+
+ return -EOPNOTSUPP;
+}
+
+static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u8 *p = data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
+ strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) {
+ snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j],
+ i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) {
+ snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j],
+ i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
+ if (!enetc_si_is_pf(priv->si))
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
+ strlcpy(p, enetc_port_counters[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void enetc_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int i, o = 0;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
+ data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg);
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ data[o++] = priv->tx_ring[i]->stats.packets;
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ data[o++] = priv->rx_ring[i]->stats.packets;
+ data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs;
+ }
+
+ if (!enetc_si_is_pf(priv->si))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
+ data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
+}
+
+#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
+ RXH_IP_DST)
+#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+{
+ static const u32 rsshash[] = {
+ [TCP_V4_FLOW] = ENETC_RSSHASH_L4,
+ [UDP_V4_FLOW] = ENETC_RSSHASH_L4,
+ [SCTP_V4_FLOW] = ENETC_RSSHASH_L4,
+ [AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3,
+ [IPV4_FLOW] = ENETC_RSSHASH_L3,
+ [TCP_V6_FLOW] = ENETC_RSSHASH_L4,
+ [UDP_V6_FLOW] = ENETC_RSSHASH_L4,
+ [SCTP_V6_FLOW] = ENETC_RSSHASH_L4,
+ [AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3,
+ [IPV6_FLOW] = ENETC_RSSHASH_L3,
+ [ETHER_FLOW] = 0,
+ };
+
+ if (rxnfc->flow_type >= ARRAY_SIZE(rsshash))
+ return -EINVAL;
+
+ rxnfc->data = rsshash[rxnfc->flow_type];
+
+ return 0;
+}
+
+/* current HW spec does byte reversal on everything including MAC addresses */
+static void ether_addr_copy_swap(u8 *dst, const u8 *src)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst[i] = src[ETH_ALEN - i - 1];
+}
+
+static int enetc_set_cls_entry(struct enetc_si *si,
+ struct ethtool_rx_flow_spec *fs, bool en)
+{
+ struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m;
+ struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m;
+ struct ethhdr *eth_h, *eth_m;
+ struct enetc_cmd_rfse rfse = { {0} };
+
+ if (!en)
+ goto done;
+
+ switch (fs->flow_type & 0xff) {
+ case TCP_V4_FLOW:
+ l4ip4_h = &fs->h_u.tcp_ip4_spec;
+ l4ip4_m = &fs->m_u.tcp_ip4_spec;
+ goto l4ip4;
+ case UDP_V4_FLOW:
+ l4ip4_h = &fs->h_u.udp_ip4_spec;
+ l4ip4_m = &fs->m_u.udp_ip4_spec;
+ goto l4ip4;
+ case SCTP_V4_FLOW:
+ l4ip4_h = &fs->h_u.sctp_ip4_spec;
+ l4ip4_m = &fs->m_u.sctp_ip4_spec;
+l4ip4:
+ rfse.sip_h[0] = l4ip4_h->ip4src;
+ rfse.sip_m[0] = l4ip4_m->ip4src;
+ rfse.dip_h[0] = l4ip4_h->ip4dst;
+ rfse.dip_m[0] = l4ip4_m->ip4dst;
+ rfse.sport_h = ntohs(l4ip4_h->psrc);
+ rfse.sport_m = ntohs(l4ip4_m->psrc);
+ rfse.dport_h = ntohs(l4ip4_h->pdst);
+ rfse.dport_m = ntohs(l4ip4_m->pdst);
+ if (l4ip4_m->tos)
+ netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
+ rfse.ethtype_h = ETH_P_IP; /* IPv4 */
+ rfse.ethtype_m = 0xffff;
+ break;
+ case IP_USER_FLOW:
+ l3ip4_h = &fs->h_u.usr_ip4_spec;
+ l3ip4_m = &fs->m_u.usr_ip4_spec;
+
+ rfse.sip_h[0] = l3ip4_h->ip4src;
+ rfse.sip_m[0] = l3ip4_m->ip4src;
+ rfse.dip_h[0] = l3ip4_h->ip4dst;
+ rfse.dip_m[0] = l3ip4_m->ip4dst;
+ if (l3ip4_m->tos)
+ netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
+ rfse.ethtype_h = ETH_P_IP; /* IPv4 */
+ rfse.ethtype_m = 0xffff;
+ break;
+ case ETHER_FLOW:
+ eth_h = &fs->h_u.ether_spec;
+ eth_m = &fs->m_u.ether_spec;
+
+ ether_addr_copy_swap(rfse.smac_h, eth_h->h_source);
+ ether_addr_copy_swap(rfse.smac_m, eth_m->h_source);
+ ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest);
+ ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest);
+ rfse.ethtype_h = ntohs(eth_h->h_proto);
+ rfse.ethtype_m = ntohs(eth_m->h_proto);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rfse.mode |= ENETC_RFSE_EN;
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC) {
+ rfse.mode |= ENETC_RFSE_MODE_BD;
+ rfse.result = fs->ring_cookie;
+ }
+done:
+ return enetc_set_fs_entry(si, &rfse, fs->location);
+}
+
+static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i, j;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->num_rx_rings;
+ break;
+ case ETHTOOL_GRXFH:
+ /* get RSS hash config */
+ return enetc_get_rsshash(rxnfc);
+ case ETHTOOL_GRXCLSRLCNT:
+ /* total number of entries */
+ rxnfc->data = priv->si->num_fs_entries;
+ /* number of entries in use */
+ rxnfc->rule_cnt = 0;
+ for (i = 0; i < priv->si->num_fs_entries; i++)
+ if (priv->cls_rules[i].used)
+ rxnfc->rule_cnt++;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (rxnfc->fs.location >= priv->si->num_fs_entries)
+ return -EINVAL;
+
+ /* get entry x */
+ rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ /* total number of entries */
+ rxnfc->data = priv->si->num_fs_entries;
+ /* array of indexes of used entries */
+ j = 0;
+ for (i = 0; i < priv->si->num_fs_entries; i++) {
+ if (!priv->cls_rules[i].used)
+ continue;
+ if (j == rxnfc->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[j++] = i;
+ }
+ /* number of entries in use */
+ rxnfc->rule_cnt = j;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (rxnfc->fs.location >= priv->si->num_fs_entries)
+ return -EINVAL;
+
+ if (rxnfc->fs.ring_cookie >= priv->num_rx_rings &&
+ rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true);
+ if (err)
+ return err;
+ priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs;
+ priv->cls_rules[rxnfc->fs.location].used = 1;
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (rxnfc->fs.location >= priv->si->num_fs_entries)
+ return -EINVAL;
+
+ err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false);
+ if (err)
+ return err;
+ priv->cls_rules[rxnfc->fs.location].used = 0;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 enetc_get_rxfh_key_size(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ /* return the size of the RX flow hash key. PF only */
+ return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0;
+}
+
+static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ /* return the size of the RX flow hash indirection table */
+ return priv->si->num_rss;
+}
+
+static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int err = 0, i;
+
+ /* return hash function */
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ /* return hash key */
+ if (key && hw->port)
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i));
+
+ /* return RSS table */
+ if (indir)
+ err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss);
+
+ return err;
+}
+
+void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+{
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+}
+
+static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int err = 0;
+
+ /* set hash key, if PF */
+ if (key && hw->port)
+ enetc_set_rss_key(hw, key);
+
+ /* set RSS table */
+ if (indir)
+ err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss);
+
+ return err;
+}
+
+static void enetc_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ ring->rx_pending = priv->rx_bd_count;
+ ring->tx_pending = priv->tx_bd_count;
+
+ /* do some h/w sanity checks for BDR length */
+ if (netif_running(ndev)) {
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR);
+
+ if (val != priv->rx_bd_count)
+ netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val);
+
+ val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR);
+
+ if (val != priv->tx_bd_count)
+ netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val);
+ }
+}
+
+static const struct ethtool_ops enetc_pf_ethtool_ops = {
+ .get_regs_len = enetc_get_reglen,
+ .get_regs = enetc_get_regs,
+ .get_sset_count = enetc_get_sset_count,
+ .get_strings = enetc_get_strings,
+ .get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_rxnfc = enetc_get_rxnfc,
+ .set_rxnfc = enetc_set_rxnfc,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_ringparam = enetc_get_ringparam,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct ethtool_ops enetc_vf_ethtool_ops = {
+ .get_regs_len = enetc_get_reglen,
+ .get_regs = enetc_get_regs,
+ .get_sset_count = enetc_get_sset_count,
+ .get_strings = enetc_get_strings,
+ .get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_rxnfc = enetc_get_rxnfc,
+ .set_rxnfc = enetc_set_rxnfc,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_ringparam = enetc_get_ringparam,
+};
+
+void enetc_set_ethtool_ops(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ if (enetc_si_is_pf(priv->si))
+ ndev->ethtool_ops = &enetc_pf_ethtool_ops;
+ else
+ ndev->ethtool_ops = &enetc_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
new file mode 100644
index 000000000000..efa0b1a5ef4f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -0,0 +1,532 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/bitops.h>
+
+/* ENETC device IDs */
+#define ENETC_DEV_ID_PF 0xe100
+#define ENETC_DEV_ID_VF 0xef00
+
+/* ENETC register block BAR */
+#define ENETC_BAR_REGS 0
+
+/** SI regs, offset: 0h */
+#define ENETC_SIMR 0
+#define ENETC_SIMR_EN BIT(31)
+#define ENETC_SIMR_RSSE BIT(0)
+#define ENETC_SICTR0 0x18
+#define ENETC_SICTR1 0x1c
+#define ENETC_SIPCAPR0 0x20
+#define ENETC_SIPCAPR0_RSS BIT(8)
+#define ENETC_SIPCAPR1 0x24
+#define ENETC_SITGTGR 0x30
+#define ENETC_SIRBGCR 0x38
+/* cache attribute registers for transactions initiated by ENETC */
+#define ENETC_SICAR0 0x40
+#define ENETC_SICAR1 0x44
+#define ENETC_SICAR2 0x48
+/* rd snoop, no alloc
+ * wr snoop, no alloc, partial cache line update for BDs and full cache line
+ * update for data
+ */
+#define ENETC_SICAR_RD_COHERENT 0x2b2b0000
+#define ENETC_SICAR_WR_COHERENT 0x00006727
+#define ENETC_SICAR_MSI 0x00300030 /* rd/wr device, no snoop, no alloc */
+
+#define ENETC_SIPMAR0 0x80
+#define ENETC_SIPMAR1 0x84
+
+/* VF-PF Message passing */
+#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
+/* msg size encoding: default and max msg value of 1024B encoded as 0 */
+static inline u32 enetc_vsi_set_msize(u32 size)
+{
+ return size < ENETC_DEFAULT_MSG_SIZE ? size >> 5 : 0;
+}
+
+#define ENETC_PSIMSGRR 0x204
+#define ENETC_PSIMSGRR_MR_MASK GENMASK(2, 1)
+#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8)
+
+#define ENETC_VSIMSGSR 0x204 /* RO */
+#define ENETC_VSIMSGSR_MB BIT(0)
+#define ENETC_VSIMSGSR_MS BIT(1)
+#define ENETC_VSIMSGSNDAR0 0x210
+#define ENETC_VSIMSGSNDAR1 0x214
+
+#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16)
+#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16)
+
+/* SI statistics */
+#define ENETC_SIROCT 0x300
+#define ENETC_SIRFRM 0x308
+#define ENETC_SIRUCA 0x310
+#define ENETC_SIRMCA 0x318
+#define ENETC_SITOCT 0x320
+#define ENETC_SITFRM 0x328
+#define ENETC_SITUCA 0x330
+#define ENETC_SITMCA 0x338
+#define ENETC_RBDCR(n) (0x8180 + (n) * 0x200)
+
+/* Control BDR regs */
+#define ENETC_SICBDRMR 0x800
+#define ENETC_SICBDRSR 0x804 /* RO */
+#define ENETC_SICBDRBAR0 0x810
+#define ENETC_SICBDRBAR1 0x814
+#define ENETC_SICBDRPIR 0x818
+#define ENETC_SICBDRCIR 0x81c
+#define ENETC_SICBDRLENR 0x820
+
+#define ENETC_SICAPR0 0x900
+#define ENETC_SICAPR1 0x904
+
+#define ENETC_PSIIER 0xa00
+#define ENETC_PSIIER_MR_MASK GENMASK(2, 1)
+#define ENETC_PSIIDR 0xa08
+#define ENETC_SITXIDR 0xa18
+#define ENETC_SIRXIDR 0xa28
+#define ENETC_SIMSIVR 0xa30
+
+#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4)
+#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4)
+
+#define ENETC_SIUEFDCR 0xe28
+
+#define ENETC_SIRFSCAPR 0x1200
+#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f)
+#define ENETC_SIRSSCAPR 0x1600
+#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
+
+/** SI BDR sub-blocks, n = 0..7 */
+enum enetc_bdr_type {TX, RX};
+#define ENETC_BDR_OFF(i) ((i) * 0x200)
+#define ENETC_BDR(t, i, r) (0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r))
+/* RX BDR reg offsets */
+#define ENETC_RBMR 0
+#define ENETC_RBMR_BDS BIT(2)
+#define ENETC_RBMR_VTE BIT(5)
+#define ENETC_RBMR_EN BIT(31)
+#define ENETC_RBSR 0x4
+#define ENETC_RBBSR 0x8
+#define ENETC_RBCIR 0xc
+#define ENETC_RBBAR0 0x10
+#define ENETC_RBBAR1 0x14
+#define ENETC_RBPIR 0x18
+#define ENETC_RBLENR 0x20
+#define ENETC_RBIER 0xa0
+#define ENETC_RBIER_RXTIE BIT(0)
+#define ENETC_RBIDR 0xa4
+#define ENETC_RBICIR0 0xa8
+#define ENETC_RBICIR0_ICEN BIT(31)
+
+/* TX BDR reg offsets */
+#define ENETC_TBMR 0
+#define ENETC_TBSR_BUSY BIT(0)
+#define ENETC_TBMR_VIH BIT(9)
+#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0)
+#define ENETC_TBMR_PRIO_SET(val) val
+#define ENETC_TBMR_EN BIT(31)
+#define ENETC_TBSR 0x4
+#define ENETC_TBBAR0 0x10
+#define ENETC_TBBAR1 0x14
+#define ENETC_TBPIR 0x18
+#define ENETC_TBCIR 0x1c
+#define ENETC_TBCIR_IDX_MASK 0xffff
+#define ENETC_TBLENR 0x20
+#define ENETC_TBIER 0xa0
+#define ENETC_TBIER_TXTIE BIT(0)
+#define ENETC_TBIDR 0xa4
+#define ENETC_TBICIR0 0xa8
+#define ENETC_TBICIR0_ICEN BIT(31)
+
+#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7)
+
+/* Port regs, offset: 1_0000h */
+#define ENETC_PORT_BASE 0x10000
+#define ENETC_PMR 0x0000
+#define ENETC_PMR_EN GENMASK(18, 16)
+#define ENETC_PSR 0x0004 /* RO */
+#define ENETC_PSIPMR 0x0018
+#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */
+#define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16)
+#define ENETC_PSIPVMR 0x001c
+#define ENETC_VLAN_PROMISC_MAP_ALL 0x7
+#define ENETC_PSIPVMR_SET_VP(simap) ((simap) & 0x7)
+#define ENETC_PSIPVMR_SET_VUTA(simap) (((simap) & 0x7) << 16)
+#define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */
+#define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8)
+#define ENETC_PVCLCTR 0x0208
+#define ENETC_VLAN_TYPE_C BIT(0)
+#define ENETC_VLAN_TYPE_S BIT(1)
+#define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */
+#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */
+#define ENETC_PSIVLAN_EN BIT(31)
+#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12)
+#define ENETC_PTXMBAR 0x0608
+#define ENETC_PCAPR0 0x0900
+#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
+#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
+#define ENETC_PCAPR1 0x0904
+#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
+#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
+#define ENETC_PSICFGR0_SET_RXBDR(val) (((val) & 0xff) << 16)
+#define ENETC_PSICFGR0_VTE BIT(12)
+#define ENETC_PSICFGR0_SIVIE BIT(14)
+#define ENETC_PSICFGR0_ASE BIT(15)
+#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */
+
+#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
+#define ENETC_PSIVLANFMR 0x1700
+#define ENETC_PSIVLANFMR_VS BIT(0)
+#define ENETC_PRFSMR 0x1800
+#define ENETC_PRFSMR_RFSE BIT(31)
+#define ENETC_PRFSCAPR 0x1804
+#define ENETC_PRFSCAPR_GET_NUM_RFS(val) ((((val) & 0xf) + 1) * 16)
+#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */
+#define ENETC_PFPMR 0x1900
+#define ENETC_PFPMR_PMACE BIT(1)
+#define ENETC_PFPMR_MWLM BIT(0)
+#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
+#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
+#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
+#define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10)
+#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
+#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
+#define ENETC_MMCSR 0x1f00
+#define ENETC_MMCSR_ME BIT(16)
+#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
+
+#define ENETC_PM0_CMD_CFG 0x8008
+#define ENETC_PM1_CMD_CFG 0x9008
+#define ENETC_PM0_TX_EN BIT(0)
+#define ENETC_PM0_RX_EN BIT(1)
+#define ENETC_PM0_PROMISC BIT(4)
+#define ENETC_PM0_CMD_XGLP BIT(10)
+#define ENETC_PM0_CMD_TXP BIT(11)
+#define ENETC_PM0_CMD_PHY_TX_EN BIT(15)
+#define ENETC_PM0_CMD_SFD BIT(21)
+#define ENETC_PM0_MAXFRM 0x8014
+#define ENETC_SET_TX_MTU(val) ((val) << 16)
+#define ENETC_SET_MAXFRM(val) ((val) & 0xffff)
+#define ENETC_PM0_IF_MODE 0x8300
+#define ENETC_PMO_IFM_RG BIT(2)
+#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
+#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
+#define ENETC_PM0_IFM_XGMII BIT(12)
+
+/* MAC counters */
+#define ENETC_PM0_REOCT 0x8100
+#define ENETC_PM0_RALN 0x8110
+#define ENETC_PM0_RXPF 0x8118
+#define ENETC_PM0_RFRM 0x8120
+#define ENETC_PM0_RFCS 0x8128
+#define ENETC_PM0_RVLAN 0x8130
+#define ENETC_PM0_RERR 0x8138
+#define ENETC_PM0_RUCA 0x8140
+#define ENETC_PM0_RMCA 0x8148
+#define ENETC_PM0_RBCA 0x8150
+#define ENETC_PM0_RDRP 0x8158
+#define ENETC_PM0_RPKT 0x8160
+#define ENETC_PM0_RUND 0x8168
+#define ENETC_PM0_R64 0x8170
+#define ENETC_PM0_R127 0x8178
+#define ENETC_PM0_R255 0x8180
+#define ENETC_PM0_R511 0x8188
+#define ENETC_PM0_R1023 0x8190
+#define ENETC_PM0_R1518 0x8198
+#define ENETC_PM0_R1519X 0x81A0
+#define ENETC_PM0_ROVR 0x81A8
+#define ENETC_PM0_RJBR 0x81B0
+#define ENETC_PM0_RFRG 0x81B8
+#define ENETC_PM0_RCNP 0x81C0
+#define ENETC_PM0_RDRNTP 0x81C8
+#define ENETC_PM0_TEOCT 0x8200
+#define ENETC_PM0_TOCT 0x8208
+#define ENETC_PM0_TCRSE 0x8210
+#define ENETC_PM0_TXPF 0x8218
+#define ENETC_PM0_TFRM 0x8220
+#define ENETC_PM0_TFCS 0x8228
+#define ENETC_PM0_TVLAN 0x8230
+#define ENETC_PM0_TERR 0x8238
+#define ENETC_PM0_TUCA 0x8240
+#define ENETC_PM0_TMCA 0x8248
+#define ENETC_PM0_TBCA 0x8250
+#define ENETC_PM0_TPKT 0x8260
+#define ENETC_PM0_TUND 0x8268
+#define ENETC_PM0_T127 0x8278
+#define ENETC_PM0_T1023 0x8290
+#define ENETC_PM0_T1518 0x8298
+#define ENETC_PM0_TCNP 0x82C0
+#define ENETC_PM0_TDFR 0x82D0
+#define ENETC_PM0_TMCOL 0x82D8
+#define ENETC_PM0_TSCOL 0x82E0
+#define ENETC_PM0_TLCOL 0x82E8
+#define ENETC_PM0_TECOL 0x82F0
+
+/* Port counters */
+#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
+#define ENETC_PBFDSIR 0x0810
+#define ENETC_PFDMSAPR 0x0814
+#define ENETC_UFDMF 0x1680
+#define ENETC_MFDMF 0x1684
+#define ENETC_PUFDVFR 0x1780
+#define ENETC_PMFDVFR 0x1784
+#define ENETC_PBFDVFR 0x1788
+
+/** Global regs, offset: 2_0000h */
+#define ENETC_GLOBAL_BASE 0x20000
+#define ENETC_G_EIPBRR0 0x0bf8
+#define ENETC_G_EIPBRR1 0x0bfc
+#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n))
+#define ENETC_G_EPFBLPR1_XGMII 0x80000000
+
+/* PCI device info */
+struct enetc_hw {
+ /* SI registers, used by all PCI functions */
+ void __iomem *reg;
+ /* Port registers, PF only */
+ void __iomem *port;
+ /* IP global registers, PF only */
+ void __iomem *global;
+};
+
+/* general register accessors */
+#define enetc_rd_reg(reg) ioread32((reg))
+#define enetc_wr_reg(reg, val) iowrite32((val), (reg))
+#ifdef ioread64
+#define enetc_rd_reg64(reg) ioread64((reg))
+#else
+/* using this to read out stats on 32b systems */
+static inline u64 enetc_rd_reg64(void __iomem *reg)
+{
+ u32 low, high, tmp;
+
+ do {
+ high = ioread32(reg + 4);
+ low = ioread32(reg);
+ tmp = ioread32(reg + 4);
+ } while (high != tmp);
+
+ return le64_to_cpu((__le64)high << 32 | low);
+}
+#endif
+
+#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off))
+#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val)
+#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off))
+/* port register accessors - PF only */
+#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
+#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+/* global register accessors - PF only */
+#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off))
+#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val)
+/* BDR register accessors, see ENETC_BDR() */
+#define enetc_bdr_rd(hw, t, n, off) \
+ enetc_rd(hw, ENETC_BDR(t, n, off))
+#define enetc_bdr_wr(hw, t, n, off, val) \
+ enetc_wr(hw, ENETC_BDR(t, n, off), val)
+#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off)
+#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off)
+#define enetc_txbdr_wr(hw, n, off, val) \
+ enetc_bdr_wr(hw, TX, n, off, val)
+#define enetc_rxbdr_wr(hw, n, off, val) \
+ enetc_bdr_wr(hw, RX, n, off, val)
+
+/* Buffer Descriptors (BD) */
+union enetc_tx_bd {
+ struct {
+ __le64 addr;
+ __le16 buf_len;
+ __le16 frm_len;
+ union {
+ struct {
+ __le16 l3_csoff;
+ u8 l4_csoff;
+ u8 flags;
+ }; /* default layout */
+ __le32 lstatus;
+ };
+ };
+ struct {
+ __le32 tstamp;
+ __le16 tpid;
+ __le16 vid;
+ u8 reserved[6];
+ u8 e_flags;
+ u8 flags;
+ } ext; /* Tx BD extension */
+};
+
+#define ENETC_TXBD_FLAGS_L4CS BIT(0)
+#define ENETC_TXBD_FLAGS_W BIT(2)
+#define ENETC_TXBD_FLAGS_CSUM BIT(3)
+#define ENETC_TXBD_FLAGS_EX BIT(6)
+#define ENETC_TXBD_FLAGS_F BIT(7)
+
+static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
+{
+ memset(txbd, 0, sizeof(*txbd));
+}
+
+/* L3 csum flags */
+#define ENETC_TXBD_L3_IPCS BIT(7)
+#define ENETC_TXBD_L3_IPV6 BIT(15)
+
+#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0)
+#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8)
+
+/* Extension flags */
+#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
+#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
+
+static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags)
+{
+ return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) |
+ (start & ENETC_TXBD_L3_START_MASK));
+}
+
+/* L4 csum flags */
+#define ENETC_TXBD_L4_UDP BIT(5)
+#define ENETC_TXBD_L4_TCP BIT(6)
+
+union enetc_rx_bd {
+ struct {
+ __le64 addr;
+ u8 reserved[8];
+ } w;
+ struct {
+ __le16 inet_csum;
+ __le16 parse_summary;
+ __le32 rss_hash;
+ __le16 buf_len;
+ __le16 vlan_opt;
+ union {
+ struct {
+ __le16 flags;
+ __le16 error;
+ };
+ __le32 lstatus;
+ };
+ } r;
+};
+
+#define ENETC_RXBD_LSTATUS_R BIT(30)
+#define ENETC_RXBD_LSTATUS_F BIT(31)
+#define ENETC_RXBD_ERR_MASK 0xff
+#define ENETC_RXBD_LSTATUS(flags) ((flags) << 16)
+#define ENETC_RXBD_FLAG_VLAN BIT(9)
+#define ENETC_RXBD_FLAG_TSTMP BIT(10)
+
+#define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */
+#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */
+#define ENETC_MAX_NUM_VFS 2
+
+struct enetc_cbd {
+ union {
+ struct {
+ __le32 addr[2];
+ __le32 opt[4];
+ };
+ __le32 data[6];
+ };
+ __le16 index;
+ __le16 length;
+ u8 cmd;
+ u8 cls;
+ u8 _res;
+ u8 status_flags;
+};
+
+#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
+#define ENETC_CBD_STATUS_MASK 0xf
+
+struct enetc_cmd_rfse {
+ u8 smac_h[6];
+ u8 smac_m[6];
+ u8 dmac_h[6];
+ u8 dmac_m[6];
+ u32 sip_h[4];
+ u32 sip_m[4];
+ u32 dip_h[4];
+ u32 dip_m[4];
+ u16 ethtype_h;
+ u16 ethtype_m;
+ u16 ethtype4_h;
+ u16 ethtype4_m;
+ u16 sport_h;
+ u16 sport_m;
+ u16 dport_h;
+ u16 dport_m;
+ u16 vlan_h;
+ u16 vlan_m;
+ u8 proto_h;
+ u8 proto_m;
+ u16 flags;
+ u16 result;
+ u16 mode;
+};
+
+#define ENETC_RFSE_EN BIT(15)
+#define ENETC_RFSE_MODE_BD 2
+
+static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
+{
+ *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
+ *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+}
+
+#define ENETC_SI_INT_IDX 0
+/* base index for Rx/Tx interrupts */
+#define ENETC_BDR_INT_BASE_IDX 1
+
+/* Messaging */
+
+/* Command completion status */
+enum enetc_msg_cmd_status {
+ ENETC_MSG_CMD_STATUS_OK,
+ ENETC_MSG_CMD_STATUS_FAIL
+};
+
+/* VSI-PSI command message types */
+enum enetc_msg_cmd_type {
+ ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */
+ ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */
+ ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */
+};
+
+/* VSI-PSI command action types */
+enum enetc_msg_cmd_action_type {
+ ENETC_MSG_CMD_MNG_ADD = 1,
+ ENETC_MSG_CMD_MNG_REMOVE
+};
+
+/* PSI-VSI command header format */
+struct enetc_msg_cmd_header {
+ u16 type; /* command class type */
+ u16 id; /* denotes the specific required action */
+};
+
+/* Common H/W utility functions */
+
+static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
+ bool en)
+{
+ u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
+
+ val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
+ enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
+}
+
+static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
+ bool en)
+{
+ u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
+
+ val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
+ enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_msg.c b/drivers/net/ethernet/freescale/enetc/enetc_msg.c
new file mode 100644
index 000000000000..40d22ebe9224
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_msg.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc_pf.h"
+
+static void enetc_msg_disable_mr_int(struct enetc_hw *hw)
+{
+ u32 psiier = enetc_rd(hw, ENETC_PSIIER);
+ /* disable MR int source(s) */
+ enetc_wr(hw, ENETC_PSIIER, psiier & ~ENETC_PSIIER_MR_MASK);
+}
+
+static void enetc_msg_enable_mr_int(struct enetc_hw *hw)
+{
+ u32 psiier = enetc_rd(hw, ENETC_PSIIER);
+
+ enetc_wr(hw, ENETC_PSIIER, psiier | ENETC_PSIIER_MR_MASK);
+}
+
+static irqreturn_t enetc_msg_psi_msix(int irq, void *data)
+{
+ struct enetc_si *si = (struct enetc_si *)data;
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ enetc_msg_disable_mr_int(&si->hw);
+ schedule_work(&pf->msg_task);
+
+ return IRQ_HANDLED;
+}
+
+static void enetc_msg_task(struct work_struct *work)
+{
+ struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task);
+ struct enetc_hw *hw = &pf->si->hw;
+ unsigned long mr_mask;
+ int i;
+
+ for (;;) {
+ mr_mask = enetc_rd(hw, ENETC_PSIMSGRR) & ENETC_PSIMSGRR_MR_MASK;
+ if (!mr_mask) {
+ /* re-arm MR interrupts, w1c the IDR reg */
+ enetc_wr(hw, ENETC_PSIIDR, ENETC_PSIIER_MR_MASK);
+ enetc_msg_enable_mr_int(hw);
+ return;
+ }
+
+ for (i = 0; i < pf->num_vfs; i++) {
+ u32 psimsgrr;
+ u16 msg_code;
+
+ if (!(ENETC_PSIMSGRR_MR(i) & mr_mask))
+ continue;
+
+ enetc_msg_handle_rxmsg(pf, i, &msg_code);
+
+ psimsgrr = ENETC_SIMSGSR_SET_MC(msg_code);
+ psimsgrr |= ENETC_PSIMSGRR_MR(i); /* w1c */
+ enetc_wr(hw, ENETC_PSIMSGRR, psimsgrr);
+ }
+ }
+}
+
+/* Init */
+static int enetc_msg_alloc_mbx(struct enetc_si *si, int idx)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct device *dev = &si->pdev->dev;
+ struct enetc_hw *hw = &si->hw;
+ struct enetc_msg_swbd *msg;
+ u32 val;
+
+ msg = &pf->rxmsg[idx];
+ /* allocate and set receive buffer */
+ msg->size = ENETC_DEFAULT_MSG_SIZE;
+
+ msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma,
+ GFP_KERNEL);
+ if (!msg->vaddr) {
+ dev_err(dev, "msg: fail to alloc dma buffer of size: %d\n",
+ msg->size);
+ return -ENOMEM;
+ }
+
+ /* set multiple of 32 bytes */
+ val = lower_32_bits(msg->dma);
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), val);
+ val = upper_32_bits(msg->dma);
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), val);
+
+ return 0;
+}
+
+static void enetc_msg_free_mbx(struct enetc_si *si, int idx)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ struct enetc_msg_swbd *msg;
+
+ msg = &pf->rxmsg[idx];
+ dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma);
+ memset(msg, 0, sizeof(*msg));
+
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), 0);
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), 0);
+}
+
+int enetc_msg_psi_init(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+ int vector, i, err;
+
+ /* register message passing interrupt handler */
+ snprintf(pf->msg_int_name, sizeof(pf->msg_int_name), "%s-vfmsg",
+ si->ndev->name);
+ vector = pci_irq_vector(si->pdev, ENETC_SI_INT_IDX);
+ err = request_irq(vector, enetc_msg_psi_msix, 0, pf->msg_int_name, si);
+ if (err) {
+ dev_err(&si->pdev->dev,
+ "PSI messaging: request_irq() failed!\n");
+ return err;
+ }
+
+ /* set one IRQ entry for PSI message receive notification (SI int) */
+ enetc_wr(&si->hw, ENETC_SIMSIVR, ENETC_SI_INT_IDX);
+
+ /* initialize PSI mailbox */
+ INIT_WORK(&pf->msg_task, enetc_msg_task);
+
+ for (i = 0; i < pf->num_vfs; i++) {
+ err = enetc_msg_alloc_mbx(si, i);
+ if (err)
+ goto err_init_mbx;
+ }
+
+ /* enable MR interrupts */
+ enetc_msg_enable_mr_int(&si->hw);
+
+ return 0;
+
+err_init_mbx:
+ for (i--; i >= 0; i--)
+ enetc_msg_free_mbx(si, i);
+
+ free_irq(vector, si);
+
+ return err;
+}
+
+void enetc_msg_psi_free(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+ int i;
+
+ cancel_work_sync(&pf->msg_task);
+
+ /* disable MR interrupts */
+ enetc_msg_disable_mr_int(&si->hw);
+
+ for (i = 0; i < pf->num_vfs; i++)
+ enetc_msg_free_mbx(si, i);
+
+ /* de-register message passing interrupt handler */
+ free_irq(pci_irq_vector(si->pdev, ENETC_SI_INT_IDX), si);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
new file mode 100644
index 000000000000..7d28f5e9b46b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -0,0 +1,931 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include "enetc_pf.h"
+
+#define ENETC_DRV_VER_MAJ 1
+#define ENETC_DRV_VER_MIN 0
+
+#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
+ __stringify(ENETC_DRV_VER_MIN)
+static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
+#define ENETC_DRV_NAME_STR "ENETC PF driver"
+static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
+
+static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
+{
+ u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si));
+ u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si));
+
+ *(u32 *)addr = upper;
+ *(u16 *)(addr + 4) = lower;
+}
+
+static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
+ const u8 *addr)
+{
+ u32 upper = *(const u32 *)addr;
+ u16 lower = *(const u16 *)(addr + 4);
+
+ __raw_writel(upper, hw->port + ENETC_PSIPMAR0(si));
+ __raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
+}
+
+static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len);
+ enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
+
+ return 0;
+}
+
+static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
+{
+ u32 val = enetc_port_rd(hw, ENETC_PSIPVMR);
+
+ val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL);
+ enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val);
+}
+
+static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx)
+{
+ return pf->vlan_promisc_simap & BIT(si_idx);
+}
+
+static bool enetc_vlan_filter_is_on(struct enetc_pf *pf)
+{
+ int i;
+
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID)
+ return true;
+
+ return false;
+}
+
+static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+ pf->vlan_promisc_simap |= BIT(si_idx);
+ enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+ pf->vlan_promisc_simap &= ~BIT(si_idx);
+ enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
+{
+ u32 val = 0;
+
+ if (vlan)
+ val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan;
+
+ enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
+}
+
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+ u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+ u64 mask = 0;
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ mask |= BIT_ULL(i * 6);
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+ filter->mac_addr_cnt = 0;
+
+ bitmap_zero(filter->mac_hash_table,
+ ENETC_MADDR_HASH_TBL_SZ);
+}
+
+static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ /* add exact match addr */
+ ether_addr_copy(filter->mac_addr, addr);
+ filter->mac_addr_cnt++;
+}
+
+static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ int idx = enetc_mac_addr_hash_idx(addr);
+
+ /* add hash table entry */
+ __set_bit(idx, filter->mac_hash_table);
+ filter->mac_addr_cnt++;
+}
+
+static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
+{
+ bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+ if (type == UC) {
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0);
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0);
+ } else { /* MC */
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0);
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0);
+ }
+}
+
+static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type,
+ u32 *hash)
+{
+ bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+ if (type == UC) {
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash);
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1));
+ } else { /* MC */
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash);
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1));
+ }
+}
+
+static void enetc_sync_mac_filters(struct enetc_pf *pf)
+{
+ struct enetc_mac_filter *f = pf->mac_filter;
+ struct enetc_si *si = pf->si;
+ int i, pos;
+
+ pos = EMETC_MAC_ADDR_FILT_RES;
+
+ for (i = 0; i < MADDR_TYPE; i++, f++) {
+ bool em = (f->mac_addr_cnt == 1) && (i == UC);
+ bool clear = !f->mac_addr_cnt;
+
+ if (clear) {
+ if (i == UC)
+ enetc_clear_mac_flt_entry(si, pos);
+
+ enetc_clear_mac_ht_flt(si, 0, i);
+ continue;
+ }
+
+ /* exact match filter */
+ if (em) {
+ int err;
+
+ enetc_clear_mac_ht_flt(si, 0, UC);
+
+ err = enetc_set_mac_flt_entry(si, pos, f->mac_addr,
+ BIT(0));
+ if (!err)
+ continue;
+
+ /* fallback to HT filtering */
+ dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n",
+ err);
+ }
+
+ /* hash table filter, clear EM filter for UC entries */
+ if (i == UC)
+ enetc_clear_mac_flt_entry(si, pos);
+
+ enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table);
+ }
+}
+
+static void enetc_pf_set_rx_mode(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct enetc_hw *hw = &priv->si->hw;
+ bool uprom = false, mprom = false;
+ struct enetc_mac_filter *filter;
+ struct netdev_hw_addr *ha;
+ u32 psipmr = 0;
+ bool em;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* enable promisc mode for SI0 (PF) */
+ psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
+ uprom = true;
+ mprom = true;
+ /* enable VLAN promisc mode for SI0 */
+ if (!enetc_si_vlan_promisc_is_on(pf, 0))
+ enetc_enable_si_vlan_promisc(pf, 0);
+
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ /* enable multi cast promisc mode for SI0 (PF) */
+ psipmr = ENETC_PSIPMR_SET_MP(0);
+ mprom = true;
+ }
+
+ /* first 2 filter entries belong to PF */
+ if (!uprom) {
+ /* Update unicast filters */
+ filter = &pf->mac_filter[UC];
+ enetc_reset_mac_addr_filter(filter);
+
+ em = (netdev_uc_count(ndev) == 1);
+ netdev_for_each_uc_addr(ha, ndev) {
+ if (em) {
+ enetc_add_mac_addr_em_filter(filter, ha->addr);
+ break;
+ }
+
+ enetc_add_mac_addr_ht_filter(filter, ha->addr);
+ }
+ }
+
+ if (!mprom) {
+ /* Update multicast filters */
+ filter = &pf->mac_filter[MC];
+ enetc_reset_mac_addr_filter(filter);
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (!is_multicast_ether_addr(ha->addr))
+ continue;
+
+ enetc_add_mac_addr_ht_filter(filter, ha->addr);
+ }
+ }
+
+ if (!uprom || !mprom)
+ /* update PF entries */
+ enetc_sync_mac_filters(pf);
+
+ psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) &
+ ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0));
+ enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
+}
+
+static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
+ u32 *hash)
+{
+ enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash);
+ enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1));
+}
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
+{
+ int i;
+
+ if (rehash) {
+ bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+ int hidx = enetc_vid_hash_idx(i);
+
+ __set_bit(hidx, pf->vlan_ht_filter);
+ }
+ }
+
+ enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter);
+}
+
+static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ int idx;
+
+ if (enetc_si_vlan_promisc_is_on(pf, 0))
+ enetc_disable_si_vlan_promisc(pf, 0);
+
+ __set_bit(vid, pf->active_vlans);
+
+ idx = enetc_vid_hash_idx(vid);
+ if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+ enetc_sync_vlan_ht_filter(pf, false);
+
+ return 0;
+}
+
+static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ __clear_bit(vid, pf->active_vlans);
+ enetc_sync_vlan_ht_filter(pf, true);
+
+ if (!enetc_vlan_filter_is_on(pf))
+ enetc_enable_si_vlan_promisc(pf, 0);
+
+ return 0;
+}
+
+static void enetc_set_loopback(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ if (reg & ENETC_PMO_IFM_RG) {
+ /* RGMII mode */
+ reg = (reg & ~ENETC_PM0_IFM_RLP) |
+ (en ? ENETC_PM0_IFM_RLP : 0);
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
+ } else {
+ /* assume SGMII mode */
+ reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ reg = (reg & ~ENETC_PM0_CMD_XGLP) |
+ (en ? ENETC_PM0_CMD_XGLP : 0);
+ reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
+ (en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
+ }
+}
+
+static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct enetc_vf_state *vf_state;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ vf_state = &pf->vf_state[vf];
+ vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC;
+ enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac);
+ return 0;
+}
+
+static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan,
+ u8 qos, __be16 proto)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (priv->si->errata & ENETC_ERR_VLAN_ISOL)
+ return -EOPNOTSUPP;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ /* only C-tags supported for now */
+ return -EPROTONOSUPPORT;
+
+ enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos);
+ return 0;
+}
+
+static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ u32 cfgr;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1));
+ cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0);
+ enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr);
+
+ return 0;
+}
+
+static void enetc_port_setup_primary_mac_address(struct enetc_si *si)
+{
+ unsigned char mac_addr[MAX_ADDR_LEN];
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ int i;
+
+ /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */
+ for (i = 0; i < pf->total_vfs + 1; i++) {
+ enetc_pf_get_primary_mac_addr(hw, i, mac_addr);
+ if (!is_zero_ether_addr(mac_addr))
+ continue;
+ eth_random_addr(mac_addr);
+ dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n",
+ i, mac_addr);
+ enetc_pf_set_primary_mac_addr(hw, i, mac_addr);
+ }
+}
+
+static void enetc_port_assign_rfs_entries(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ int num_entries, vf_entries, i;
+ u32 val;
+
+ /* split RFS entries between functions */
+ val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+ num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+ vf_entries = num_entries / (pf->total_vfs + 1);
+
+ for (i = 0; i < pf->total_vfs; i++)
+ enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries);
+ enetc_port_wr(hw, ENETC_PSIRFSCFGR(0),
+ num_entries - vf_entries * pf->total_vfs);
+
+ /* enable RFS on port */
+ enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
+}
+
+static void enetc_port_si_configure(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ int num_rings, i;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PCAPR0);
+ num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
+
+ val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS);
+ val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS);
+
+ if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) {
+ val = ENETC_PSICFGR0_SET_TXBDR(num_rings);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+
+ dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n",
+ num_rings, ENETC_PF_NUM_RINGS);
+
+ num_rings = 0;
+ }
+
+ /* Add default one-time settings for SI0 (PF) */
+ val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+
+ enetc_port_wr(hw, ENETC_PSICFGR0(0), val);
+
+ if (num_rings)
+ num_rings -= ENETC_PF_NUM_RINGS;
+
+ /* Configure the SIs for each available VF */
+ val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+ val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
+
+ if (num_rings) {
+ num_rings /= pf->total_vfs;
+ val |= ENETC_PSICFGR0_SET_TXBDR(num_rings);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+ }
+
+ for (i = 0; i < pf->total_vfs; i++)
+ enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val);
+
+ /* Port level VLAN settings */
+ val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+ enetc_port_wr(hw, ENETC_PVCLCTR, val);
+ /* use outer tag for VLAN filtering */
+ enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
+}
+
+static void enetc_configure_port_mac(struct enetc_hw *hw)
+{
+ enetc_port_wr(hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+
+ enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
+
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
+ ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
+ ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ /* set auto-speed for RGMII */
+ if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG)
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
+ if (enetc_global_rd(hw, ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII)
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
+}
+
+static void enetc_configure_port_pmac(struct enetc_hw *hw)
+{
+ u32 temp;
+
+ /* Set pMAC step lock */
+ temp = enetc_port_rd(hw, ENETC_PFPMR);
+ enetc_port_wr(hw, ENETC_PFPMR,
+ temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
+
+ temp = enetc_port_rd(hw, ENETC_MMCSR);
+ enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
+}
+
+static void enetc_configure_port(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
+ struct enetc_hw *hw = &pf->si->hw;
+
+ enetc_configure_port_pmac(hw);
+
+ enetc_configure_port_mac(hw);
+
+ enetc_port_si_configure(pf->si);
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc_set_rss_key(hw, hash_key);
+
+ /* split up RFS entries */
+ enetc_port_assign_rfs_entries(pf->si);
+
+ /* fix-up primary MAC addresses, if not set already */
+ enetc_port_setup_primary_mac_address(pf->si);
+
+ /* enforce VLAN promisc mode for all SIs */
+ pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL;
+ enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap);
+
+ enetc_port_wr(hw, ENETC_PSIPMR, 0);
+
+ /* enable port */
+ enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN);
+}
+
+/* Messaging */
+static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf,
+ int vf_id)
+{
+ struct enetc_vf_state *vf_state = &pf->vf_state[vf_id];
+ struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
+ struct enetc_msg_cmd_set_primary_mac *cmd;
+ struct device *dev = &pf->si->pdev->dev;
+ u16 cmd_id;
+ char *addr;
+
+ cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr;
+ cmd_id = cmd->header.id;
+ if (cmd_id != ENETC_MSG_CMD_MNG_ADD)
+ return ENETC_MSG_CMD_STATUS_FAIL;
+
+ addr = cmd->mac.sa_data;
+ if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC)
+ dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n",
+ vf_id);
+ else
+ enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr);
+
+ return ENETC_MSG_CMD_STATUS_OK;
+}
+
+void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status)
+{
+ struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_msg_cmd_header *cmd_hdr;
+ u16 cmd_type;
+
+ *status = ENETC_MSG_CMD_STATUS_OK;
+ cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr;
+ cmd_type = cmd_hdr->type;
+
+ switch (cmd_type) {
+ case ENETC_MSG_CMD_MNG_MAC:
+ *status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id);
+ break;
+ default:
+ dev_err(dev, "command not supported (cmd_type: 0x%x)\n",
+ cmd_type);
+ }
+}
+
+#ifdef CONFIG_PCI_IOV
+static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ int err;
+
+ if (!num_vfs) {
+ enetc_msg_psi_free(pf);
+ kfree(pf->vf_state);
+ pf->num_vfs = 0;
+ pci_disable_sriov(pdev);
+ } else {
+ pf->num_vfs = num_vfs;
+
+ pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
+ GFP_KERNEL);
+ if (!pf->vf_state) {
+ pf->num_vfs = 0;
+ return -ENOMEM;
+ }
+
+ err = enetc_msg_psi_init(pf);
+ if (err) {
+ dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
+ goto err_msg_psi;
+ }
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err);
+ goto err_en_sriov;
+ }
+ }
+
+ return num_vfs;
+
+err_en_sriov:
+ enetc_msg_psi_free(pf);
+err_msg_psi:
+ kfree(pf->vf_state);
+ pf->num_vfs = 0;
+
+ return err;
+}
+#else
+#define enetc_sriov_configure(pdev, num_vfs) (void)0
+#endif
+
+static int enetc_pf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ enetc_enable_rxvlan(&priv->si->hw, 0,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+ enetc_enable_txvlan(&priv->si->hw, 0,
+ !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+
+ if (changed & NETIF_F_LOOPBACK)
+ enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+ return enetc_set_features(ndev, features);
+}
+
+static const struct net_device_ops enetc_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_pf_set_mac_addr,
+ .ndo_set_rx_mode = enetc_pf_set_rx_mode,
+ .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
+ .ndo_set_vf_mac = enetc_pf_set_vf_mac,
+ .ndo_set_vf_vlan = enetc_pf_set_vf_vlan,
+ .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
+ .ndo_set_features = enetc_pf_set_features,
+};
+
+static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_LOOPBACK;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (si->num_rss)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ if (si->errata & ENETC_ERR_TXCSUM) {
+ ndev->hw_features &= ~NETIF_F_HW_CSUM;
+ ndev->features &= ~NETIF_F_HW_CSUM;
+ }
+
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* pick up primary MAC address from SI */
+ enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+}
+
+static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+ int err;
+
+ if (!np) {
+ dev_err(priv->dev, "missing ENETC port node\n");
+ return -ENODEV;
+ }
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!priv->phy_node) {
+ if (!of_phy_is_fixed_link(np)) {
+ dev_err(priv->dev, "PHY not specified\n");
+ return -ENODEV;
+ }
+
+ err = of_phy_register_fixed_link(np);
+ if (err < 0) {
+ dev_err(priv->dev, "fixed link registration failed\n");
+ return err;
+ }
+
+ priv->phy_node = of_node_get(np);
+ }
+
+ priv->if_mode = of_get_phy_mode(np);
+ if (priv->if_mode < 0) {
+ dev_err(priv->dev, "missing phy type\n");
+ of_node_put(priv->phy_node);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void enetc_of_put_phy(struct enetc_ndev_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+
+ if (np && of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+}
+
+static int enetc_pf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ struct enetc_si *si;
+ struct enetc_pf *pf;
+ int err;
+
+ if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+ dev_info(&pdev->dev, "device is disabled, skipping\n");
+ return -ENODEV;
+ }
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+ if (err) {
+ dev_err(&pdev->dev, "PCI probing failed\n");
+ return err;
+ }
+
+ si = pci_get_drvdata(pdev);
+ if (!si->hw.port || !si->hw.global) {
+ err = -ENODEV;
+ dev_err(&pdev->dev, "could not map PF space, probing a VF?\n");
+ goto err_map_pf_space;
+ }
+
+ pf = enetc_si_priv(si);
+ pf->si = si;
+ pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+
+ enetc_configure_port(pf);
+
+ enetc_get_si_caps(si);
+
+ ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+ if (!ndev) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "netdev creation failed\n");
+ goto err_alloc_netdev;
+ }
+
+ enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+ priv = netdev_priv(ndev);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_alloc_si_resources(priv);
+ if (err) {
+ dev_err(&pdev->dev, "SI resource alloc failed\n");
+ goto err_alloc_si_res;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(&pdev->dev, "MSIX alloc failed\n");
+ goto err_alloc_msix;
+ }
+
+ err = enetc_of_get_phy(priv);
+ if (err)
+ dev_warn(&pdev->dev, "Fallback to PHY-less operation\n");
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_reg_netdev;
+
+ netif_carrier_off(ndev);
+
+ netif_info(priv, probe, ndev, "%s v%s\n",
+ enetc_drv_name, enetc_drv_ver);
+
+ return 0;
+
+err_reg_netdev:
+ enetc_of_put_phy(priv);
+ enetc_free_msix(priv);
+err_alloc_msix:
+ enetc_free_si_resources(priv);
+err_alloc_si_res:
+ si->ndev = NULL;
+ free_netdev(ndev);
+err_alloc_netdev:
+err_map_pf_space:
+ enetc_pci_remove(pdev);
+
+ return err;
+}
+
+static void enetc_pf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_ndev_priv *priv;
+
+ if (pf->num_vfs)
+ enetc_sriov_configure(pdev, 0);
+
+ priv = netdev_priv(si->ndev);
+ netif_info(priv, drv, si->ndev, "%s v%s remove\n",
+ enetc_drv_name, enetc_drv_ver);
+
+ unregister_netdev(si->ndev);
+
+ enetc_of_put_phy(priv);
+
+ enetc_free_msix(priv);
+
+ enetc_free_si_resources(priv);
+
+ free_netdev(si->ndev);
+
+ enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_pf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pf_id_table);
+
+static struct pci_driver enetc_pf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_pf_id_table,
+ .probe = enetc_pf_probe,
+ .remove = enetc_pf_remove,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = enetc_sriov_configure,
+#endif
+};
+module_pci_driver(enetc_pf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
new file mode 100644
index 000000000000..2061ae5ccba0
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+
+#define ENETC_PF_NUM_RINGS 8
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
+
+#define ENETC_MADDR_HASH_TBL_SZ 64
+struct enetc_mac_filter {
+ union {
+ char mac_addr[ETH_ALEN];
+ DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+ };
+ int mac_addr_cnt;
+};
+
+#define ENETC_VLAN_HT_SIZE 64
+
+enum enetc_vf_flags {
+ ENETC_VF_FLAG_PF_SET_MAC = BIT(0),
+};
+
+struct enetc_vf_state {
+ enum enetc_vf_flags flags;
+};
+
+struct enetc_pf {
+ struct enetc_si *si;
+ int num_vfs; /* number of active VFs, after sriov_init */
+ int total_vfs; /* max number of VFs, set for PF at probe */
+ struct enetc_vf_state *vf_state;
+
+ struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT];
+
+ struct enetc_msg_swbd rxmsg[ENETC_MAX_NUM_VFS];
+ struct work_struct msg_task;
+ char msg_int_name[ENETC_INT_NAME_MAX];
+
+ char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */
+ DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+ DECLARE_BITMAP(active_vlans, VLAN_N_VID);
+};
+
+int enetc_msg_psi_init(struct enetc_pf *pf);
+void enetc_msg_psi_free(struct enetc_pf *pf);
+void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
new file mode 100644
index 000000000000..64bebee9f52a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include "enetc.h"
+
+#define ENETC_DRV_VER_MAJ 1
+#define ENETC_DRV_VER_MIN 0
+
+#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
+ __stringify(ENETC_DRV_VER_MIN)
+static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
+#define ENETC_DRV_NAME_STR "ENETC VF driver"
+static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
+
+/* Messaging */
+static void enetc_msg_vsi_write_msg(struct enetc_hw *hw,
+ struct enetc_msg_swbd *msg)
+{
+ u32 val;
+
+ val = enetc_vsi_set_msize(msg->size) | lower_32_bits(msg->dma);
+ enetc_wr(hw, ENETC_VSIMSGSNDAR1, upper_32_bits(msg->dma));
+ enetc_wr(hw, ENETC_VSIMSGSNDAR0, val);
+}
+
+static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg)
+{
+ int timeout = 100;
+ u32 vsimsgsr;
+
+ enetc_msg_vsi_write_msg(&si->hw, msg);
+
+ do {
+ vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR);
+ if (!(vsimsgsr & ENETC_VSIMSGSR_MB))
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* check for message delivery error */
+ if (vsimsgsr & ENETC_VSIMSGSR_MS) {
+ dev_err(&si->pdev->dev, "VSI command execute error: %d\n",
+ ENETC_SIMSGSR_GET_MC(vsimsgsr));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv,
+ struct sockaddr *saddr)
+{
+ struct enetc_msg_cmd_set_primary_mac *cmd;
+ struct enetc_msg_swbd msg;
+ int err;
+
+ msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64);
+ msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma,
+ GFP_KERNEL);
+ if (!msg.vaddr) {
+ dev_err(priv->dev, "Failed to alloc Tx msg (size: %d)\n",
+ msg.size);
+ return -ENOMEM;
+ }
+
+ cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr;
+ cmd->header.type = ENETC_MSG_CMD_MNG_MAC;
+ cmd->header.id = ENETC_MSG_CMD_MNG_ADD;
+ memcpy(&cmd->mac, saddr, sizeof(struct sockaddr));
+
+ /* send the command and wait */
+ err = enetc_msg_vsi_send(priv->si, &msg);
+
+ dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma);
+
+ return err;
+}
+
+static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int enetc_vf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ return enetc_set_features(ndev, features);
+}
+
+/* Probing/ Init */
+static const struct net_device_ops enetc_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_vf_set_mac_addr,
+ .ndo_set_features = enetc_vf_set_features,
+};
+
+static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (si->num_rss)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ if (si->errata & ENETC_ERR_TXCSUM) {
+ ndev->hw_features &= ~NETIF_F_HW_CSUM;
+ ndev->features &= ~NETIF_F_HW_CSUM;
+ }
+
+ /* pick up primary MAC address from SI */
+ enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+}
+
+static int enetc_vf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ struct enetc_si *si;
+ int err;
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
+ if (err) {
+ dev_err(&pdev->dev, "PCI probing failed\n");
+ return err;
+ }
+
+ si = pci_get_drvdata(pdev);
+
+ enetc_get_si_caps(si);
+
+ ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+ if (!ndev) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "netdev creation failed\n");
+ goto err_alloc_netdev;
+ }
+
+ enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+ priv = netdev_priv(ndev);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_alloc_si_resources(priv);
+ if (err) {
+ dev_err(&pdev->dev, "SI resource alloc failed\n");
+ goto err_alloc_si_res;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(&pdev->dev, "MSIX alloc failed\n");
+ goto err_alloc_msix;
+ }
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_reg_netdev;
+
+ netif_carrier_off(ndev);
+
+ netif_info(priv, probe, ndev, "%s v%s\n",
+ enetc_drv_name, enetc_drv_ver);
+
+ return 0;
+
+err_reg_netdev:
+ enetc_free_msix(priv);
+err_alloc_msix:
+ enetc_free_si_resources(priv);
+err_alloc_si_res:
+ si->ndev = NULL;
+ free_netdev(ndev);
+err_alloc_netdev:
+ enetc_pci_remove(pdev);
+
+ return err;
+}
+
+static void enetc_vf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_ndev_priv *priv;
+
+ priv = netdev_priv(si->ndev);
+ netif_info(priv, drv, si->ndev, "%s v%s remove\n",
+ enetc_drv_name, enetc_drv_ver);
+ unregister_netdev(si->ndev);
+
+ enetc_free_msix(priv);
+
+ enetc_free_si_resources(priv);
+
+ free_netdev(si->ndev);
+
+ enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_vf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_vf_id_table);
+
+static struct pci_driver enetc_vf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_vf_id_table,
+ .probe = enetc_vf_probe,
+ .remove = enetc_vf_remove,
+};
+module_pci_driver(enetc_vf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 5d64519b9b1d..6bf346c11b25 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -788,8 +788,9 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
int ring_num = hns_rcb_get_ring_num(dsaf_dev);
rcb_common =
- devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
- ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL);
+ devm_kzalloc(dsaf_dev->dev,
+ struct_size(rcb_common, ring_pair_cb, ring_num),
+ GFP_KERNEL);
if (!rcb_common) {
dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 36eab37d8a40..585800e634e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -124,6 +124,7 @@ enum hnae3_reset_notify_type {
HNAE3_DOWN_CLIENT,
HNAE3_INIT_CLIENT,
HNAE3_UNINIT_CLIENT,
+ HNAE3_RESTORE_CLIENT,
};
enum hnae3_reset_type {
@@ -432,7 +433,8 @@ struct hnae3_ae_ops {
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
u16 *alloc_tqps, u16 *max_rss_size);
- int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+ int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured);
void (*get_flowctrl_adv)(struct hnae3_handle *handle,
u32 *flowctrl_adv);
int (*set_led_id)(struct hnae3_handle *handle,
@@ -475,7 +477,6 @@ struct hnae3_dcb_ops {
u8 (*getdcbx)(struct hnae3_handle *);
u8 (*setdcbx)(struct hnae3_handle *, u8);
- int (*map_update)(struct hnae3_handle *);
int (*setup_tc)(struct hnae3_handle *, u8, u8 *);
};
@@ -500,6 +501,7 @@ struct hnae3_tc_info {
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
+ u16 req_rss_size;
u16 rx_buf_len;
u16 num_desc;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 1bf7a5f116a0..4b38c37eef8f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -348,6 +348,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
return ret;
}
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
/* enable the vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
@@ -361,11 +363,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
goto out_start_err;
- clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
-
return 0;
out_start_err:
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
while (j--)
hns3_tqp_disable(h->kinfo.tqp[j]);
@@ -506,7 +507,7 @@ static u8 hns3_get_netdev_flags(struct net_device *netdev)
u8 flags = 0;
if (netdev->flags & IFF_PROMISC) {
- flags = HNAE3_USER_UPE | HNAE3_USER_MPE;
+ flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
} else {
flags |= HNAE3_VLAN_FLTR;
if (netdev->flags & IFF_ALLMULTI)
@@ -541,13 +542,13 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
}
}
- hns3_update_promisc_mode(netdev, new_flags);
/* User mode Promisc mode enable and vlan filtering is disabled to
* let all packets in. MAC-VLAN Table overflow Promisc enabled and
* vlan fitering is enabled
*/
hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
h->netdev_flags = new_flags;
+ hns3_update_promisc_mode(netdev, new_flags);
}
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
@@ -1133,6 +1134,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
struct hns3_enet_ring *ring)
{
struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
struct skb_frag_struct *frag;
int bdnum_for_frag;
int frag_num;
@@ -1155,7 +1157,19 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
buf_num += bdnum_for_frag;
}
- if (buf_num > ring_space(ring))
+ if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
+ buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
+ if (ring_space(ring) < buf_num)
+ return -EBUSY;
+ /* manual split the send packet */
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
+ }
+
+ if (unlikely(ring_space(ring) < buf_num))
return -EBUSY;
*bnum = buf_num;
@@ -1166,11 +1180,24 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
struct hns3_enet_ring *ring)
{
struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
int buf_num;
/* No. of segments (plus a header) */
buf_num = skb_shinfo(skb)->nr_frags + 1;
+ if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
+ buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
+ if (ring_space(ring) < buf_num)
+ return -EBUSY;
+ /* manual split the send packet */
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
+ }
+
if (unlikely(ring_space(ring) < buf_num))
return -EBUSY;
@@ -1399,7 +1426,12 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
int queue_num = priv->ae_handle->kinfo.num_tqps;
struct hnae3_handle *handle = priv->ae_handle;
struct hns3_enet_ring *ring;
+ u64 rx_length_errors = 0;
+ u64 rx_crc_errors = 0;
+ u64 rx_multicast = 0;
unsigned int start;
+ u64 tx_errors = 0;
+ u64 rx_errors = 0;
unsigned int idx;
u64 tx_bytes = 0;
u64 rx_bytes = 0;
@@ -1420,8 +1452,8 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
- tx_drop += ring->stats.tx_busy;
tx_drop += ring->stats.sw_err_cnt;
+ tx_errors += ring->stats.sw_err_cnt;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1431,8 +1463,13 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts;
rx_drop += ring->stats.non_vld_descs;
- rx_drop += ring->stats.err_pkt_len;
rx_drop += ring->stats.l2_err;
+ rx_errors += ring->stats.non_vld_descs;
+ rx_errors += ring->stats.l2_err;
+ rx_crc_errors += ring->stats.l2_err;
+ rx_crc_errors += ring->stats.l3l4_csum_err;
+ rx_multicast += ring->stats.rx_multicast;
+ rx_length_errors += ring->stats.err_pkt_len;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
}
@@ -1441,15 +1478,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
stats->rx_bytes = rx_bytes;
stats->rx_packets = rx_pkts;
- stats->rx_errors = netdev->stats.rx_errors;
- stats->multicast = netdev->stats.multicast;
- stats->rx_length_errors = netdev->stats.rx_length_errors;
- stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+ stats->rx_errors = rx_errors;
+ stats->multicast = rx_multicast;
+ stats->rx_length_errors = rx_length_errors;
+ stats->rx_crc_errors = rx_crc_errors;
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
- stats->tx_errors = netdev->stats.tx_errors;
- stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
- stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
+ stats->tx_errors = tx_errors;
+ stats->rx_dropped = rx_drop;
+ stats->tx_dropped = tx_drop;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1472,8 +1509,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
u8 tc = mqprio_qopt->qopt.num_tc;
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
- bool if_running;
- int ret;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
@@ -1485,24 +1520,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (!netdev)
return -EINVAL;
- if_running = netif_running(netdev);
- if (if_running) {
- hns3_nic_net_stop(netdev);
- msleep(100);
- }
-
- ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
+ return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
- if (ret)
- goto out;
-
- ret = hns3_nic_set_real_num_queue(netdev);
-
-out:
- if (if_running)
- hns3_nic_net_open(netdev);
-
- return ret;
}
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -1771,6 +1790,7 @@ static void hns3_remove(struct pci_dev *pdev)
hns3_disable_sriov(pdev);
hnae3_unregister_ae_dev(ae_dev);
+ pci_set_drvdata(pdev, NULL);
}
/**
@@ -2572,6 +2592,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ enum hns3_pkt_l2t_type l2_frame_type;
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
@@ -2680,7 +2701,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return -EFAULT;
}
+ l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+ HNS3_RXD_DMAC_S);
u64_stats_update_begin(&ring->syncp);
+ if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+ ring->stats.rx_multicast++;
+
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
u64_stats_update_end(&ring->syncp);
@@ -3185,6 +3211,9 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
+ if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
+ continue;
+
ret = hns3_get_vector_ring_chain(tqp_vector,
&vector_ring_chain);
if (ret)
@@ -3205,7 +3234,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
}
- priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
netif_napi_del(&priv->tqp_vector[i].napi);
@@ -3238,6 +3266,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
{
struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps;
+ int desc_num = priv->ae_handle->kinfo.num_desc;
struct pci_dev *pdev = priv->ae_handle->pdev;
struct hns3_enet_ring *ring;
@@ -3263,7 +3292,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->dev = priv->dev;
ring->desc_dma_addr = 0;
ring->buf_size = q->buf_size;
- ring->desc_num = q->desc_num;
+ ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
@@ -3375,6 +3404,11 @@ static void hns3_fini_ring(struct hns3_enet_ring *ring)
ring->desc_cb = NULL;
ring->next_to_clean = 0;
ring->next_to_use = 0;
+ ring->pending_buf = 0;
+ if (ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ }
}
static int hns3_buf_size2type(u32 buf_size)
@@ -3581,6 +3615,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->netdev = netdev;
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
@@ -3725,8 +3760,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
- bool if_running;
- int ret;
if (tc > HNAE3_MAX_TC)
return -EINVAL;
@@ -3734,25 +3767,7 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (!ndev)
return -ENODEV;
- if_running = netif_running(ndev);
-
- if (if_running) {
- (void)hns3_nic_net_stop(ndev);
- msleep(100);
- }
-
- ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
- kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
- if (ret)
- goto err_out;
-
- ret = hns3_nic_set_real_num_queue(ndev);
-
-err_out:
- if (if_running)
- (void)hns3_nic_net_open(ndev);
-
- return ret;
+ return hns3_nic_set_real_num_queue(ndev);
}
static int hns3_recover_hw_addr(struct net_device *ndev)
@@ -4013,41 +4028,18 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
- bool vlan_filter_enable;
int ret;
- ret = hns3_init_mac_addr(netdev, false);
- if (ret)
- return ret;
-
- ret = hns3_recover_hw_addr(netdev);
- if (ret)
- return ret;
-
- ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
- if (ret)
- return ret;
-
- vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
- hns3_enable_vlan_filter(netdev, vlan_filter_enable);
-
- /* Hardware table is only clear when pf resets */
- if (!(handle->flags & HNAE3_SUPPORT_VF)) {
- ret = hns3_restore_vlan(netdev);
- if (ret)
- return ret;
- }
+ /* Carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
- ret = hns3_restore_fd_rules(netdev);
+ ret = hns3_get_ring_config(priv);
if (ret)
return ret;
- /* Carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
-
ret = hns3_nic_alloc_vector_data(priv);
if (ret)
- return ret;
+ goto err_put_ring;
hns3_restore_coal(priv);
@@ -4068,10 +4060,44 @@ err_uninit_vector:
priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
+err_put_ring:
+ hns3_put_ring_config(priv);
+ priv->ring_data = NULL;
return ret;
}
+static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ bool vlan_filter_enable;
+ int ret;
+
+ ret = hns3_init_mac_addr(netdev, false);
+ if (ret)
+ return ret;
+
+ ret = hns3_recover_hw_addr(netdev);
+ if (ret)
+ return ret;
+
+ ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
+ if (ret)
+ return ret;
+
+ vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
+ hns3_enable_vlan_filter(netdev, vlan_filter_enable);
+
+ /* Hardware table is only clear when pf resets */
+ if (!(handle->flags & HNAE3_SUPPORT_VF)) {
+ ret = hns3_restore_vlan(netdev);
+ if (ret)
+ return ret;
+ }
+
+ return hns3_restore_fd_rules(netdev);
+}
+
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
@@ -4101,6 +4127,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
+ hns3_put_ring_config(priv);
+ priv->ring_data = NULL;
+
clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
@@ -4124,6 +4153,9 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
case HNAE3_UNINIT_CLIENT:
ret = hns3_reset_notify_uninit_enet(handle);
break;
+ case HNAE3_RESTORE_CLIENT:
+ ret = hns3_reset_notify_restore_enet(handle);
+ break;
default:
break;
}
@@ -4131,57 +4163,12 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret;
}
-static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = hns3_get_handle(netdev);
- int ret;
-
- ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
- if (ret)
- return ret;
-
- ret = hns3_get_ring_config(priv);
- if (ret)
- return ret;
-
- ret = hns3_nic_alloc_vector_data(priv);
- if (ret)
- goto err_alloc_vector;
-
- hns3_restore_coal(priv);
-
- ret = hns3_nic_init_vector_data(priv);
- if (ret)
- goto err_uninit_vector;
-
- ret = hns3_init_all_ring(priv);
- if (ret)
- goto err_put_ring;
-
- return 0;
-
-err_put_ring:
- hns3_put_ring_config(priv);
-err_uninit_vector:
- hns3_nic_uninit_vector_data(priv);
-err_alloc_vector:
- hns3_nic_dealloc_vector_data(priv);
- return ret;
-}
-
-static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
-{
- return (new_tqp_num / num_tc) * num_tc;
-}
-
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
- bool if_running = netif_running(netdev);
+ bool rxfh_configured = netif_is_rxfh_configured(netdev);
u32 new_tqp_num = ch->combined_count;
u16 org_tqp_num;
int ret;
@@ -4190,39 +4177,29 @@ int hns3_set_channels(struct net_device *netdev,
return -EINVAL;
if (new_tqp_num > hns3_get_max_available_channels(h) ||
- new_tqp_num < kinfo->num_tc) {
+ new_tqp_num < 1) {
dev_err(&netdev->dev,
- "Change tqps fail, the tqp range is from %d to %d",
- kinfo->num_tc,
+ "Change tqps fail, the tqp range is from 1 to %d",
hns3_get_max_available_channels(h));
return -EINVAL;
}
- new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
- if (kinfo->num_tqps == new_tqp_num)
+ if (kinfo->rss_size == new_tqp_num)
return 0;
- if (if_running)
- hns3_nic_net_stop(netdev);
-
- ret = hns3_nic_uninit_vector_data(priv);
- if (ret) {
- dev_err(&netdev->dev,
- "Unbind vector with tqp fail, nothing is changed");
- goto open_netdev;
- }
-
- hns3_store_coal(priv);
-
- hns3_nic_dealloc_vector_data(priv);
+ ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
- hns3_uninit_all_ring(priv);
- hns3_put_ring_config(priv);
+ ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
org_tqp_num = h->kinfo.num_tqps;
- ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+ ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured);
if (ret) {
- ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+ ret = h->ae_algo->ops->set_channels(h, org_tqp_num,
+ rxfh_configured);
if (ret) {
/* If revert to old tqp failed, fatal error occurred */
dev_err(&netdev->dev,
@@ -4232,12 +4209,11 @@ int hns3_set_channels(struct net_device *netdev,
dev_info(&netdev->dev,
"Change tqp num fail, Revert to old tqp num");
}
+ ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
-open_netdev:
- if (if_running)
- hns3_nic_net_open(netdev);
-
- return ret;
+ return hns3_reset_notify(h, HNAE3_UP_CLIENT);
}
static const struct hnae3_client_ops client_ops = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index e55995e93bb0..f3d248626ab3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -202,6 +202,13 @@ enum hns3_nic_state {
#define HNS3_RING_EN_B 0
+enum hns3_pkt_l2t_type {
+ HNS3_L2_TYPE_UNICAST,
+ HNS3_L2_TYPE_MULTICAST,
+ HNS3_L2_TYPE_BROADCAST,
+ HNS3_L2_TYPE_INVALID,
+};
+
enum hns3_pkt_l3t_type {
HNS3_L3T_NONE,
HNS3_L3T_IPV6,
@@ -376,6 +383,7 @@ struct ring_stats {
u64 err_bd_num;
u64 l2_err;
u64 l3l4_csum_err;
+ u64 rx_multicast;
};
};
};
@@ -412,7 +420,6 @@ struct hns3_enet_ring {
unsigned char *va; /* first buffer address for current packet */
u32 flag; /* ring attribute */
- int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index e678b6939da3..ed73f7fc9171 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -47,6 +47,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("err_bd_num", err_bd_num),
HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("multicast", rx_multicast),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -116,7 +117,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000);
- return 0;
+ return ret;
}
static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
@@ -333,10 +334,10 @@ static void hns3_self_test(struct net_device *ndev,
continue;
data[test_index] = hns3_lp_up(ndev, loop_type);
- if (!data[test_index]) {
+ if (!data[test_index])
data[test_index] = hns3_lp_run_test(ndev, loop_type);
- hns3_lp_down(ndev, loop_type);
- }
+
+ hns3_lp_down(ndev, loop_type);
if (data[test_index])
eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index e483a6e730e6..81dbe1b6abb0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -170,8 +170,12 @@ static bool hclge_is_special_opcode(u16 opcode)
/* these commands have several descriptors,
* and use the first one to save opcode and return value
*/
- u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
- HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
+ u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT,
+ HCLGE_OPC_STATS_MAC,
+ HCLGE_OPC_STATS_MAC_ALL,
+ HCLGE_OPC_QUERY_32_BIT_REG,
+ HCLGE_OPC_QUERY_64_BIT_REG};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -259,6 +263,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
+ else if (desc_ret == HCLGE_CMD_NO_AUTH)
+ retval = -EPERM;
+ else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
+ retval = -EOPNOTSUPP;
else
retval = -EIO;
hw->cmq.last_status = desc_ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index f23042b24c09..e26a25128693 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -39,7 +39,7 @@ struct hclge_cmq_ring {
enum hclge_cmd_return_status {
HCLGE_CMD_EXEC_SUCCESS = 0,
HCLGE_CMD_NO_AUTH = 1,
- HCLGE_CMD_NOT_EXEC = 2,
+ HCLGE_CMD_NOT_SUPPORTED = 2,
HCLGE_CMD_QUEUE_FULL = 3,
};
@@ -82,6 +82,8 @@ enum hclge_opcode_type {
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
+ HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
+ HCLGE_OPC_STATS_MAC_ALL = 0x0034,
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index f6323b2501dc..961aedb0e20f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -156,21 +156,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
return 0;
}
-static int hclge_map_update(struct hnae3_handle *h)
+static int hclge_map_update(struct hclge_dev *hdev)
{
- struct hclge_vport *vport = hclge_get_vport(h);
- struct hclge_dev *hdev = vport->back;
int ret;
- ret = hclge_tm_map_cfg(hdev);
- if (ret)
- return ret;
-
- ret = hclge_tm_schd_mode_hw(hdev);
+ ret = hclge_tm_schd_setup_hw(hdev);
if (ret)
return ret;
- ret = hclge_pause_setup_hw(hdev);
+ ret = hclge_pause_setup_hw(hdev, false);
if (ret)
return ret;
@@ -222,19 +216,51 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
return ret;
+ if (map_changed) {
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+ }
+
hclge_tm_schd_info_update(hdev, num_tc);
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
- return ret;
+ goto err_out;
if (map_changed) {
+ ret = hclge_map_update(hdev);
+ if (ret)
+ goto err_out;
+
ret = hclge_client_setup_tc(hdev);
if (ret)
+ goto err_out;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
return ret;
}
return hclge_tm_dwrr_cfg(hdev);
+
+err_out:
+ if (!map_changed)
+ return ret;
+
+ if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
+ return ret;
+
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return ret;
}
static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
@@ -283,6 +309,9 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
return -EINVAL;
+ if (pfc->pfc_en == hdev->tm_info.pfc_en)
+ return 0;
+
prio_tc = hdev->tm_info.prio_tc;
pfc_map = 0;
@@ -295,12 +324,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
}
}
- if (pfc_map == hdev->tm_info.hw_pfc_map)
- return 0;
-
hdev->tm_info.hw_pfc_map = pfc_map;
+ hdev->tm_info.pfc_en = pfc->pfc_en;
- return hclge_pause_setup_hw(hdev);
+ return hclge_pause_setup_hw(hdev, false);
}
/* DCBX configuration */
@@ -345,12 +372,24 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
if (ret)
return -EINVAL;
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
hclge_tm_schd_info_update(hdev, tc);
hclge_tm_prio_tc_info_update(hdev, prio_tc);
- ret = hclge_tm_init_hw(hdev);
+ ret = hclge_tm_init_hw(hdev, false);
if (ret)
- return ret;
+ goto err_out;
+
+ ret = hclge_client_setup_tc(hdev);
+ if (ret)
+ goto err_out;
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
@@ -359,7 +398,18 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
else
hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
- return 0;
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+
+err_out:
+ if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
+ return ret;
+
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return ret;
}
static const struct hnae3_dcb_ops hns3_dcb_ops = {
@@ -369,7 +419,6 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = {
.ieee_setpfc = hclge_ieee_setpfc,
.getdcbx = hclge_getdcbx,
.setdcbx = hclge_setdcbx,
- .map_update = hclge_map_update,
.setup_tc = hclge_setup_tc,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f7637c08bb3a..ae8336c18264 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -118,6 +118,12 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
{"mac_rx_mac_pause_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
+ {"mac_tx_control_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
+ {"mac_rx_control_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
+ {"mac_tx_pfc_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
{"mac_tx_pfc_pri0_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
{"mac_tx_pfc_pri1_pkt_num",
@@ -134,6 +140,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
{"mac_tx_pfc_pri7_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
+ {"mac_rx_pfc_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
{"mac_rx_pfc_pri0_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
{"mac_rx_pfc_pri1_pkt_num",
@@ -287,10 +295,9 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static int hclge_mac_update_stats(struct hclge_dev *hdev)
+static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
-#define HCLGE_RTN_DATA_NUM 4
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
@@ -308,22 +315,102 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev)
}
for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
+ /* for special opcode 0032, only the first desc has the head */
if (unlikely(i == 0)) {
desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RTN_DATA_NUM - 2;
+ n = HCLGE_RD_FIRST_STATS_NUM;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_RD_OTHER_STATS_NUM;
+ }
+
+ for (k = 0; k < n; k++) {
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
+{
+ u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ struct hclge_desc *desc;
+ __le64 *desc_data;
+ u16 i, k, n;
+ int ret;
+
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
+ if (ret) {
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < desc_num; i++) {
+ /* for special opcode 0034, only the first desc has the head */
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_RD_FIRST_STATS_NUM;
} else {
desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RTN_DATA_NUM;
+ n = HCLGE_RD_OTHER_STATS_NUM;
}
+
for (k = 0; k < n; k++) {
- *data++ += le64_to_cpu(*desc_data);
+ *data += le64_to_cpu(*desc_data);
+ data++;
desc_data++;
}
}
+ kfree(desc);
+
+ return 0;
+}
+
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
+{
+ struct hclge_desc desc;
+ __le32 *desc_data;
+ u32 reg_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ desc_data = (__le32 *)(&desc.data[0]);
+ reg_num = le32_to_cpu(*desc_data);
+
+ *desc_num = 1 + ((reg_num - 3) >> 2) +
+ (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
+
return 0;
}
+static int hclge_mac_update_stats(struct hclge_dev *hdev)
+{
+ u32 desc_num;
+ int ret;
+
+ ret = hclge_mac_query_reg_num(hdev, &desc_num);
+
+ /* The firmware supports the new statistics acquisition method */
+ if (!ret)
+ ret = hclge_mac_update_stats_complete(hdev, desc_num);
+ else if (ret == -EOPNOTSUPP)
+ ret = hclge_mac_update_stats_defective(hdev);
+ else
+ dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
+
+ return ret;
+}
+
static int hclge_tqps_update_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -461,26 +548,6 @@ static u8 *hclge_comm_get_strings(u32 stringset,
return (u8 *)buff;
}
-static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
- struct net_device_stats *net_stats)
-{
- net_stats->tx_dropped = 0;
- net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
- net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
-
- net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
- net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
-
- net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
- net_stats->rx_length_errors =
- hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_length_errors +=
- hw_stats->mac_stats.mac_rx_oversize_pkt_num;
- net_stats->rx_over_errors =
- hw_stats->mac_stats.mac_rx_oversize_pkt_num;
-}
-
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
{
struct hnae3_handle *handle;
@@ -500,8 +567,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
if (status)
dev_err(&hdev->pdev->dev,
"Update MAC stats fail, status = %d.\n", status);
-
- hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
}
static void hclge_update_stats(struct hnae3_handle *handle,
@@ -509,7 +574,6 @@ static void hclge_update_stats(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
int status;
if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
@@ -527,8 +591,6 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update TQPS stats fail, status = %d.\n",
status);
- hclge_update_netstat(hw_stats, net_stats);
-
clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
}
@@ -1068,14 +1130,14 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
return ret;
}
-static int hclge_assign_tqp(struct hclge_vport *vport)
+static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int i, alloced;
for (i = 0, alloced = 0; i < hdev->num_tqps &&
- alloced < kinfo->num_tqps; i++) {
+ alloced < num_tqps; i++) {
if (!hdev->htqp[i].alloced) {
hdev->htqp[i].q.handle = &vport->nic;
hdev->htqp[i].q.tqp_index = alloced;
@@ -1085,7 +1147,9 @@ static int hclge_assign_tqp(struct hclge_vport *vport)
alloced++;
}
}
- vport->alloc_tqps = kinfo->num_tqps;
+ vport->alloc_tqps = alloced;
+ kinfo->rss_size = min_t(u16, hdev->rss_size_max,
+ vport->alloc_tqps / hdev->tm_info.num_tc);
return 0;
}
@@ -1096,36 +1160,17 @@ static int hclge_knic_setup(struct hclge_vport *vport,
struct hnae3_handle *nic = &vport->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
struct hclge_dev *hdev = vport->back;
- int i, ret;
+ int ret;
kinfo->num_desc = num_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
- kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
- kinfo->rss_size
- = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
- kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (hdev->hw_tc_map & BIT(i)) {
- kinfo->tc_info[i].enable = true;
- kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
- kinfo->tc_info[i].tqp_count = kinfo->rss_size;
- kinfo->tc_info[i].tc = i;
- } else {
- /* Set to default queue if TC is disable */
- kinfo->tc_info[i].enable = false;
- kinfo->tc_info[i].tqp_offset = 0;
- kinfo->tc_info[i].tqp_count = 1;
- kinfo->tc_info[i].tc = 0;
- }
- }
- kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
return -ENOMEM;
- ret = hclge_assign_tqp(vport);
+ ret = hclge_assign_tqp(vport, num_tqps);
if (ret)
dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
@@ -1140,7 +1185,7 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
u16 i;
kinfo = &nic->kinfo;
- for (i = 0; i < kinfo->num_tqps; i++) {
+ for (i = 0; i < vport->alloc_tqps; i++) {
struct hclge_tqp *q =
container_of(kinfo->tqp[i], struct hclge_tqp, q);
bool is_pf;
@@ -2122,7 +2167,9 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
+ struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
+ struct hnae3_handle *rhandle;
struct hnae3_handle *handle;
int state;
int i;
@@ -2134,6 +2181,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
handle = &hdev->vport[i].nic;
client->ops->link_status_change(handle, state);
+ rhandle = &hdev->vport[i].roce;
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
}
hdev->hw.mac.link = state;
}
@@ -2418,8 +2469,8 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
hclge_free_vector(hdev, 0);
}
-static int hclge_notify_client(struct hclge_dev *hdev,
- enum hnae3_reset_notify_type type)
+int hclge_notify_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->nic_client;
u16 i;
@@ -2883,6 +2934,10 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset_lock;
+ ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev);
@@ -3788,8 +3843,16 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_promisc_param param;
+ bool en_bc_pmc = true;
+
+ /* For revision 0x20, if broadcast promisc enabled, vlan filter is
+ * always bypassed. So broadcast promisc should be disabled until
+ * user enable promisc mode
+ */
+ if (handle->pdev->revision == 0x20)
+ en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
vport->vport_id);
return hclge_cmd_set_promisc_mode(hdev, &param);
}
@@ -5174,8 +5237,15 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
{
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
+
+#define HCLGE_MAC_LINK_STATUS_MS 20
+#define HCLGE_MAC_LINK_STATUS_NUM 10
+#define HCLGE_MAC_LINK_STATUS_DOWN 0
+#define HCLGE_MAC_LINK_STATUS_UP 1
+
struct hclge_serdes_lb_cmd *req;
struct hclge_desc desc;
+ int mac_link_ret = 0;
int ret, i = 0;
u8 loop_mode_b;
@@ -5198,8 +5268,10 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
if (en) {
req->enable = loop_mode_b;
req->mask = loop_mode_b;
+ mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
} else {
req->mask = loop_mode_b;
+ mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -5231,7 +5303,19 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
}
hclge_cfg_mac_mode(hdev, en);
- return 0;
+
+ i = 0;
+ do {
+ /* serdes Internal loopback, independent of the network cable.*/
+ msleep(HCLGE_MAC_LINK_STATUS_MS);
+ ret = hclge_get_mac_link_status(hdev);
+ if (ret == mac_link_ret)
+ return 0;
+ } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+
+ dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
+
+ return -EBUSY;
}
static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
@@ -5258,6 +5342,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo;
struct hclge_dev *hdev = vport->back;
int i, ret;
@@ -5276,7 +5361,11 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
break;
}
- for (i = 0; i < vport->alloc_tqps; i++) {
+ if (ret)
+ return ret;
+
+ kinfo = &vport->nic.kinfo;
+ for (i = 0; i < kinfo->num_tqps; i++) {
ret = hclge_tqp_enable(hdev, i, 0, en);
if (ret)
return ret;
@@ -5288,11 +5377,13 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo;
struct hnae3_queue *queue;
struct hclge_tqp *tqp;
int i;
- for (i = 0; i < vport->alloc_tqps; i++) {
+ kinfo = &vport->nic.kinfo;
+ for (i = 0; i < kinfo->num_tqps; i++) {
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
@@ -7456,7 +7547,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- ret = hclge_tm_init_hw(hdev);
+ ret = hclge_tm_init_hw(hdev, true);
if (ret) {
dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
return ret;
@@ -7523,18 +7614,17 @@ static u32 hclge_get_max_channels(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+ return min_t(u32, hdev->rss_size_max,
+ vport->alloc_tqps / kinfo->num_tc);
}
static void hclge_get_channels(struct hnae3_handle *handle,
struct ethtool_channels *ch)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
-
ch->max_combined = hclge_get_max_channels(handle);
ch->other_count = 1;
ch->max_other = 1;
- ch->combined_count = vport->alloc_tqps;
+ ch->combined_count = handle->kinfo.rss_size;
}
static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
@@ -7547,26 +7637,8 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->rss_size_max;
}
-static void hclge_release_tqp(struct hclge_vport *vport)
-{
- struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- struct hclge_dev *hdev = vport->back;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp =
- container_of(kinfo->tqp[i], struct hclge_tqp, q);
-
- tqp->q.handle = NULL;
- tqp->q.tqp_index = 0;
- tqp->alloced = false;
- }
-
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
- kinfo->tqp = NULL;
-}
-
-static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -7580,24 +7652,11 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
u32 *rss_indir;
int ret, i;
- /* Free old tqps, and reallocate with new tqp number when nic setup */
- hclge_release_tqp(vport);
-
- ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
- if (ret) {
- dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
- return ret;
- }
-
- ret = hclge_map_tqp_to_vport(hdev, vport);
- if (ret) {
- dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
- return ret;
- }
+ kinfo->req_rss_size = new_tqps_num;
- ret = hclge_tm_schd_init(hdev);
+ ret = hclge_tm_vport_map_update(hdev);
if (ret) {
- dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
return ret;
}
@@ -7618,6 +7677,10 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
if (ret)
return ret;
+ /* RSS indirection table has been configuared by user */
+ if (rxfh_configured)
+ goto out;
+
/* Reinitializes the rss indirect table according to the new RSS size */
rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
if (!rss_indir)
@@ -7633,6 +7696,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
kfree(rss_indir);
+out:
if (!ret)
dev_info(&hdev->pdev->dev,
"Channels changed, rss_size from %d to %d, tqps from %d to %d",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 6615b85a1c52..2c413c63c6c9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -16,6 +16,9 @@
#define HCLGE_MAX_PF_NUM 8
+#define HCLGE_RD_FIRST_STATS_NUM 2
+#define HCLGE_RD_OTHER_STATS_NUM 4
+
#define HCLGE_INVALID_VPORT 0xffff
#define HCLGE_PF_CFG_BLOCK_SIZE 32
@@ -322,6 +325,7 @@ struct hclge_tm_info {
struct hclge_tc_info tc_info[HNAE3_MAX_TC];
enum hclge_fc_mode fc_mode;
u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
+ u8 pfc_en; /* PFC enabled or not for user priority */
};
struct hclge_comm_stats_str {
@@ -415,6 +419,10 @@ struct hclge_mac_stats {
u64 mac_rx_fcs_err_pkt_num;
u64 mac_rx_send_app_good_pkt_num;
u64 mac_rx_send_app_bad_pkt_num;
+ u64 mac_tx_pfc_pause_pkt_num;
+ u64 mac_rx_pfc_pause_pkt_num;
+ u64 mac_tx_ctrl_pkt_num;
+ u64 mac_rx_ctrl_pkt_num;
};
#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
@@ -878,4 +886,6 @@ void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
+int hclge_notify_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a1de451a85df..3603034aa45c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -203,12 +203,11 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
- bool en_uc = req->msg[1] ? true : false;
- bool en_mc = req->msg[2] ? true : false;
+ bool en_bc = req->msg[1] ? true : false;
struct hclge_promisc_param param;
- /* always enable broadcast promisc bit */
- hclge_promisc_param_init(&param, en_uc, en_mc, true, vport->vport_id);
+ /* vf is not allowed to enable unicast/multicast broadcast */
+ hclge_promisc_param_init(&param, false, false, en_bc, vport->vport_id);
return hclge_cmd_set_promisc_mode(vport->back, &param);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 00458da67503..9f4069fb786b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -517,19 +517,32 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
+ u16 max_rss_size;
u8 i;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
- kinfo->num_tc =
- min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
- kinfo->rss_size
- = min_t(u16, hdev->rss_size_max,
- kinfo->num_tqps / kinfo->num_tc);
+ kinfo->num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
+ max_rss_size = min_t(u16, hdev->rss_size_max,
+ vport->alloc_tqps / kinfo->num_tc);
+
+ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
+ kinfo->req_rss_size <= max_rss_size) {
+ dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
+ kinfo->rss_size, kinfo->req_rss_size);
+ kinfo->rss_size = kinfo->req_rss_size;
+ } else if (kinfo->rss_size > max_rss_size ||
+ (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
+ kinfo->rss_size, max_rss_size);
+ kinfo->rss_size = max_rss_size;
+ }
+
+ kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size;
- for (i = 0; i < kinfo->num_tc; i++) {
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
if (hdev->hw_tc_map & BIT(i)) {
kinfo->tc_info[i].enable = true;
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
@@ -934,6 +947,36 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
return 0;
}
+static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
+{
+#define DEFAULT_TC_WEIGHT 1
+#define DEFAULT_TC_OFFSET 14
+
+ struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_desc desc;
+ int i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hclge_pg_info *pg_info;
+
+ ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ pg_info =
+ &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
+ ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
+ }
+
+ ets_weight->weight_offset = DEFAULT_TC_OFFSET;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -983,6 +1026,19 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
if (ret)
return ret;
+
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
+ ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "fw %08x does't support ets tc weight cmd\n",
+ hdev->fw_version);
+ ret = 0;
+ }
+
+ return ret;
} else {
ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
if (ret)
@@ -992,7 +1048,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
return 0;
}
-int hclge_tm_map_cfg(struct hclge_dev *hdev)
+static int hclge_tm_map_cfg(struct hclge_dev *hdev)
{
int ret;
@@ -1107,7 +1163,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
return 0;
}
-int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
+static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
{
int ret;
@@ -1118,7 +1174,7 @@ int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
return hclge_tm_lvl34_schd_mode_cfg(hdev);
}
-static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
+int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
{
int ret;
@@ -1159,7 +1215,7 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
HCLGE_RX_MAC_PAUSE_EN_MSK;
return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
- hdev->tm_info.hw_pfc_map);
+ hdev->tm_info.pfc_en);
}
/* Each Tc has a 1024 queue sets to backpress, it divides to
@@ -1228,10 +1284,23 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
}
-int hclge_pause_setup_hw(struct hclge_dev *hdev)
+static int hclge_tm_bp_setup(struct hclge_dev *hdev)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ ret = hclge_bp_setup_hw(hdev, i);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
{
int ret;
- u8 i;
ret = hclge_pause_param_setup_hw(hdev);
if (ret)
@@ -1245,18 +1314,17 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (!hnae3_dev_dcb_supported(hdev))
return 0;
- /* When MAC is GE Mode, hdev does not support pfc setting */
+ /* GE MAC does not support PFC, when driver is initializing and MAC
+ * is in GE Mode, ignore the error here, otherwise initialization
+ * will fail.
+ */
ret = hclge_pfc_setup_hw(hdev);
- if (ret)
- dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
-
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_bp_setup_hw(hdev, i);
- if (ret)
- return ret;
- }
+ if (init && ret == -EOPNOTSUPP)
+ dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
+ else
+ return ret;
- return 0;
+ return hclge_tm_bp_setup(hdev);
}
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
@@ -1294,7 +1362,7 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hclge_tm_schd_info_init(hdev);
}
-int hclge_tm_init_hw(struct hclge_dev *hdev)
+int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
{
int ret;
@@ -1306,7 +1374,7 @@ int hclge_tm_init_hw(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_pause_setup_hw(hdev);
+ ret = hclge_pause_setup_hw(hdev, init);
if (ret)
return ret;
@@ -1325,5 +1393,22 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
if (ret)
return ret;
- return hclge_tm_init_hw(hdev);
+ return hclge_tm_init_hw(hdev, true);
+}
+
+int hclge_tm_vport_map_update(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+
+ hclge_tm_vport_tc_info_update(vport);
+
+ ret = hclge_vport_q_to_qs_map(hdev, vport);
+ if (ret)
+ return ret;
+
+ if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
+ return 0;
+
+ return hclge_tm_bp_setup(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index b6496a439304..f60e540c7a62 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -142,13 +142,13 @@ struct hclge_port_shapping_cmd {
(HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev);
-int hclge_pause_setup_hw(struct hclge_dev *hdev);
-int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
+int hclge_tm_vport_map_update(struct hclge_dev *hdev);
+int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init);
+int hclge_tm_schd_setup_hw(struct hclge_dev *hdev);
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
-int hclge_tm_map_cfg(struct hclge_dev *hdev);
-int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 82103d5fa815..fc99a0c70164 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -349,16 +349,21 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
{
+ struct hnae3_handle *rhandle = &hdev->roce;
struct hnae3_handle *handle = &hdev->nic;
+ struct hnae3_client *rclient;
struct hnae3_client *client;
client = handle->client;
+ rclient = hdev->roce_client;
link_state =
test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
if (link_state != hdev->hw.mac.link) {
client->ops->link_status_change(handle, !!link_state);
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle, !!link_state);
hdev->hw.mac.link = link_state;
}
}
@@ -964,33 +969,29 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
}
static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
- bool en_uc_pmc, bool en_mc_pmc)
+ bool en_bc_pmc)
{
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclgevf_desc desc;
- int status;
+ int ret;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
- req->msg[1] = en_uc_pmc ? 1 : 0;
- req->msg[2] = en_mc_pmc ? 1 : 0;
+ req->msg[1] = en_bc_pmc ? 1 : 0;
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status)
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
dev_err(&hdev->pdev->dev,
- "Set promisc mode fail, status is %d.\n", status);
+ "Set promisc mode fail, status is %d.\n", ret);
- return status;
+ return ret;
}
-static int hclgevf_set_promisc_mode(struct hnae3_handle *handle,
- bool en_uc_pmc, bool en_mc_pmc)
+static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
-
- return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
+ return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
@@ -1264,7 +1265,7 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
- return 0;
+ return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
@@ -2377,6 +2378,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_config;
+ /* vf is not allowed to enable unicast/multicast promisc mode.
+ * For revision 0x20, default to disable broadcast promisc mode,
+ * firmware makes sure broadcast packets can be accepted.
+ * For revision 0x21, default to enable broadcast promisc mode.
+ */
+ ret = hclgevf_set_promisc_mode(hdev, true);
+ if (ret)
+ goto err_config;
+
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2461,7 +2471,8 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
struct hnae3_handle *nic = &hdev->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
- return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+ return min_t(u32, hdev->rss_size_max,
+ hdev->num_tqps / kinfo->num_tc);
}
/**
@@ -2482,7 +2493,7 @@ static void hclgevf_get_channels(struct hnae3_handle *handle,
ch->max_combined = hclgevf_get_max_channels(hdev);
ch->other_count = 0;
ch->max_other = 0;
- ch->combined_count = hdev->num_tqps;
+ ch->combined_count = handle->kinfo.rss_size;
}
static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
@@ -2640,7 +2651,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_vector = hclgevf_get_vector,
.put_vector = hclgevf_put_vector,
.reset_queue = hclgevf_reset_tqp,
- .set_promisc_mode = hclgevf_set_promisc_mode,
.get_mac_addr = hclgevf_get_mac_addr,
.set_mac_addr = hclgevf_set_mac_addr,
.add_uc_addr = hclgevf_add_uc_addr,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 6b19607a4caa..3875f39f43bb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -1008,3 +1008,16 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
&hw_ci, sizeof(hw_ci), NULL,
NULL, HINIC_MGMT_MSG_SYNC);
}
+
+/**
+ * hinic_hwdev_set_msix_state- set msix state
+ * @hwdev: the NIC HW device
+ * @msix_index: IRQ corresponding index number
+ * @flag: msix state
+ *
+ **/
+void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
+ enum hinic_msix_state flag)
+{
+ hinic_set_msix_state(hwdev->hwif, msix_index, flag);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index d1a7d2522d82..c9e621e19dd0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -240,4 +240,7 @@ int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
u8 pending_limit, u8 coalesc_timer);
+void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
+ enum hinic_msix_state flag);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 823a17061a97..9b160f076904 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -168,6 +168,22 @@ void hinic_db_state_set(struct hinic_hwif *hwif,
hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
}
+void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
+ enum hinic_msix_state flag)
+{
+ u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE +
+ HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL;
+ u32 mask_bits;
+
+ mask_bits = readl(hwif->intr_regs_base + offset);
+ mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ if (flag)
+ mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ writel(mask_bits, hwif->intr_regs_base + offset);
+}
+
/**
* hwif_ready - test if the HW is ready for use
* @hwif: the HW interface of a pci function device
@@ -321,6 +337,13 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
return -ENOMEM;
}
+ hwif->intr_regs_base = pci_ioremap_bar(pdev, HINIC_PCI_INTR_REGS_BAR);
+ if (!hwif->intr_regs_base) {
+ dev_err(&pdev->dev, "Failed to map configuration regs\n");
+ err = -ENOMEM;
+ goto err_map_intr_bar;
+ }
+
err = hwif_ready(hwif);
if (err) {
dev_err(&pdev->dev, "HW interface is not ready\n");
@@ -337,7 +360,11 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
return 0;
err_hwif_ready:
+ iounmap(hwif->intr_regs_base);
+
+err_map_intr_bar:
iounmap(hwif->cfg_regs_bar);
+
return err;
}
@@ -347,5 +374,6 @@ err_hwif_ready:
**/
void hinic_free_hwif(struct hinic_hwif *hwif)
{
+ iounmap(hwif->intr_regs_base);
iounmap(hwif->cfg_regs_bar);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index 5b4760c0e9f5..22ec7f73e0a6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -152,6 +152,7 @@
#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
#define HINIC_PCI_CFG_REGS_BAR 0
+#define HINIC_PCI_INTR_REGS_BAR 2
#define HINIC_PCI_DB_BAR 4
#define HINIC_PCIE_ST_DISABLE 0
@@ -164,6 +165,10 @@
#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */
#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */
+#define HINIC_PCI_MSIX_ENTRY_SIZE 16
+#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+
enum hinic_pcie_nosnoop {
HINIC_PCIE_SNOOP = 0,
HINIC_PCIE_NO_SNOOP = 1,
@@ -207,6 +212,11 @@ enum hinic_db_state {
HINIC_DB_DISABLE = 1,
};
+enum hinic_msix_state {
+ HINIC_MSIX_ENABLE,
+ HINIC_MSIX_DISABLE,
+};
+
struct hinic_func_attr {
u16 func_idx;
u8 pf_idx;
@@ -226,6 +236,7 @@ struct hinic_func_attr {
struct hinic_hwif {
struct pci_dev *pdev;
void __iomem *cfg_regs_bar;
+ void __iomem *intr_regs_base;
struct hinic_func_attr attr;
};
@@ -251,6 +262,9 @@ int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
u8 *lli_timer, u8 *lli_credit_limit,
u8 *resend_timer);
+void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
+ enum hinic_msix_state flag);
+
int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index);
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index da323b9e1f62..e64bc664f687 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -51,9 +51,10 @@ static unsigned int rx_weight = 64;
module_param(rx_weight, uint, 0644);
MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
-#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822
-#define HINIC_DEV_ID_DUAL_PORT_25GE 0x0200
-#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0201
+#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822
+#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200
+#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205
+#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210
#define HINIC_WQ_NAME "hinic_dev"
@@ -1113,8 +1114,9 @@ static void hinic_shutdown(struct pci_dev *pdev)
static const struct pci_device_id hinic_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_25GE), 0},
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
{ 0, 0}
};
MODULE_DEVICE_TABLE(pci, hinic_pci_table);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 0098b206e7e9..b6d218768ec1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -381,6 +381,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
static int rx_poll(struct napi_struct *napi, int budget)
{
struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
+ struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
struct hinic_rq *rq = rxq->rq;
int pkts;
@@ -389,7 +390,10 @@ static int rx_poll(struct napi_struct *napi, int budget)
return budget;
napi_complete(napi);
- enable_irq(rq->irq);
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_ENABLE);
+
return pkts;
}
@@ -414,7 +418,10 @@ static irqreturn_t rx_irq(int irq, void *data)
struct hinic_dev *nic_dev;
/* Disable the interrupt until napi will be completed */
- disable_irq_nosync(rq->irq);
+ nic_dev = netdev_priv(rxq->netdev);
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_DISABLE);
nic_dev = netdev_priv(rxq->netdev);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 11e73e67358d..e17bf33eba0c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -655,7 +655,9 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
if (pkts < budget) {
napi_complete(napi);
- enable_irq(sq->irq);
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ sq->msix_entry,
+ HINIC_MSIX_ENABLE);
return pkts;
}
@@ -682,7 +684,9 @@ static irqreturn_t tx_irq(int irq, void *data)
nic_dev = netdev_priv(txq->netdev);
/* Disable the interrupt until napi will be completed */
- disable_irq_nosync(txq->sq->irq);
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ txq->sq->msix_entry,
+ HINIC_MSIX_DISABLE);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 8de9085bba9e..d684998ba2b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/xdp_sock.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_client.h"
@@ -523,6 +524,8 @@ struct i40e_pf {
#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
+#define I40E_FLAG_RS_FEC BIT(25)
+#define I40E_FLAG_BASE_R_FEC BIT(26)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
@@ -787,11 +790,6 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
-
- /* AF_XDP zero-copy */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -1091,6 +1089,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
+
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
return !!vsi->xdp_prog;
@@ -1104,10 +1104,10 @@ static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
if (ring_is_xdp(ring))
qid -= ring->vsi->alloc_queue_pairs;
- if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on)
+ if (!xdp_on)
return NULL;
- return ring->vsi->xsk_umems[qid];
+ return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
}
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index a20d1cf058ad..c67d485d6f99 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1642,30 +1642,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
}
- if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
- if (cnt != 1) {
- dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
- goto netdev_ops_write_done;
- }
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "tx_timeout: VSI %d not found\n", vsi_seid);
- } else if (!vsi->netdev) {
- dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
- vsi_seid);
- } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
- dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
- vsi_seid);
- } else if (rtnl_trylock()) {
- vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
- rtnl_unlock();
- dev_info(&pf->pdev->dev, "tx_timeout called\n");
- } else {
- dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
- }
- } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
int mtu;
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
@@ -1733,7 +1710,6 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
i40e_dbg_netdev_ops_buf);
dev_info(&pf->pdev->dev, "available commands\n");
- dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n");
dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index a6bc7847346b..4c885801fa26 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -438,6 +438,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("disable-source-pruning",
I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
+ I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
+ I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -606,6 +608,24 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
}
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ }
+ }
/* need to add new 10G PHY types */
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) {
@@ -721,6 +741,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -825,6 +852,9 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -838,6 +868,10 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, advertising,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -855,6 +889,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
break;
case I40E_PHY_TYPE_25GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_ACC:
@@ -862,9 +903,15 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
-
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -1261,6 +1308,154 @@ done:
return err;
}
+static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status status = 0;
+ u32 flags = 0;
+ int err = 0;
+
+ flags = READ_ONCE(pf->flags);
+ i40e_set_fec_in_flags(fec_cfg, &flags);
+
+ /* Get the current phy config */
+ memset(&abilities, 0, sizeof(abilities));
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ if (abilities.fec_cfg_curr_mod_ext_info != fec_cfg) {
+ struct i40e_aq_set_phy_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.phy_type = abilities.phy_type;
+ config.abilities = abilities.abilities;
+ config.phy_type_ext = abilities.phy_type_ext;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = fec_cfg & I40E_AQ_PHY_FEC_CONFIG_MASK;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status) {
+ netdev_info(netdev,
+ "Set phy config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ goto done;
+ }
+ pf->flags = flags;
+ status = i40e_update_link_info(hw);
+ if (status)
+ /* debug level message only due to relation to the link
+ * itself rather than to the FEC settings
+ * (e.g. no physical connection etc.)
+ */
+ netdev_dbg(netdev,
+ "Updating link info failed with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+
+done:
+ return err;
+}
+
+static int i40e_get_fec_param(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status status = 0;
+ int err = 0;
+
+ /* Get the current phy config */
+ memset(&abilities, 0, sizeof(abilities));
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ fecparam->fec = 0;
+ if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+ if ((abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_SET_FEC_REQUEST_RS) ||
+ (abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_SET_FEC_ABILITY_RS))
+ fecparam->fec |= ETHTOOL_FEC_RS;
+ if ((abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_SET_FEC_REQUEST_KR) ||
+ (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR))
+ fecparam->fec |= ETHTOOL_FEC_BASER;
+ if (abilities.fec_cfg_curr_mod_ext_info == 0)
+ fecparam->fec |= ETHTOOL_FEC_OFF;
+
+ if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ else
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+done:
+ return err;
+}
+
+static int i40e_set_fec_param(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u8 fec_cfg = 0;
+ int err = 0;
+
+ if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
+ hw->device_id != I40E_DEV_ID_25G_B) {
+ err = -EPERM;
+ goto done;
+ }
+
+ switch (fecparam->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS);
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR);
+ break;
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_NONE:
+ fec_cfg = 0;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
+ fecparam->fec);
+ err = -EINVAL;
+ goto done;
+ }
+
+ err = i40e_set_fec_cfg(netdev, fec_cfg);
+
+done:
+ return err;
+}
+
static int i40e_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
@@ -1376,7 +1571,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
else if (!pause->rx_pause && !pause->tx_pause)
hw->fc.requested_mode = I40E_FC_NONE;
else
- return -EINVAL;
+ return -EINVAL;
/* Tell the OS link is going down, the link will go back up when fw
* says it is ready asynchronously
@@ -2175,7 +2370,7 @@ static int i40e_get_ts_info(struct net_device *dev,
return 0;
}
-static int i40e_link_test(struct net_device *netdev, u64 *data)
+static u64 i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2198,7 +2393,7 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_reg_test(struct net_device *netdev, u64 *data)
+static u64 i40e_reg_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2209,7 +2404,7 @@ static int i40e_reg_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
+static u64 i40e_eeprom_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2223,7 +2418,7 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_intr_test(struct net_device *netdev, u64 *data)
+static u64 i40e_intr_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2440,10 +2635,10 @@ static int i40e_set_phys_id(struct net_device *netdev,
default:
break;
}
- if (ret)
- return -ENOENT;
- else
- return 0;
+ if (ret)
+ return -ENOENT;
+ else
+ return 0;
}
/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
@@ -4676,6 +4871,15 @@ flags_complete:
}
}
+ if (((changed_flags & I40E_FLAG_RS_FEC) ||
+ (changed_flags & I40E_FLAG_BASE_R_FEC)) &&
+ pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
+ pf->hw.device_id != I40E_DEV_ID_25G_B) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FEC configuration\n");
+ return -EOPNOTSUPP;
+ }
+
/* Now that we've checked to ensure that the new flags are valid, load
* them into place. Since we only modify flags either (a) during
* initialization or (b) while holding the RTNL lock, we don't need
@@ -4714,6 +4918,24 @@ flags_complete:
}
}
+ if ((changed_flags & I40E_FLAG_RS_FEC) ||
+ (changed_flags & I40E_FLAG_BASE_R_FEC)) {
+ u8 fec_cfg = 0;
+
+ if (pf->flags & I40E_FLAG_RS_FEC &&
+ pf->flags & I40E_FLAG_BASE_R_FEC) {
+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
+ } else if (pf->flags & I40E_FLAG_RS_FEC) {
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS);
+ } else if (pf->flags & I40E_FLAG_BASE_R_FEC) {
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR);
+ }
+ if (i40e_set_fec_cfg(dev, fec_cfg))
+ dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
+ }
+
if ((changed_flags & pf->flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -4948,6 +5170,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_per_queue_coalesce = i40e_set_per_queue_coalesce,
.get_link_ksettings = i40e_get_link_ksettings,
.set_link_ksettings = i40e_set_link_ksettings,
+ .get_fecparam = i40e_get_fec_param,
+ .set_fecparam = i40e_set_fec_param,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f52e2c46e6a7..5c6731b97059 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,8 +26,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 7
-#define DRV_VERSION_BUILD 6
+#define DRV_VERSION_MINOR 8
+#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -3610,7 +3610,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
(I40E_QUEUE_TYPE_TX
<< I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
}
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
@@ -8131,8 +8131,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
- set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
- set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
}
exit:
@@ -11042,6 +11042,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
return 0;
+ queue_count = min_t(int, queue_count, num_online_cpus());
new_rss_size = min_t(int, queue_count, pf->rss_size_max);
if (queue_count != vsi->num_queue_pairs) {
@@ -11644,7 +11645,8 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_pf *pf = np->vsi->back;
@@ -13859,6 +13861,29 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
}
/**
+ * i40e_set_fec_in_flags - helper function for setting FEC options in flags
+ * @fec_cfg: FEC option to set in flags
+ * @flags: ptr to flags in which we set FEC option
+ **/
+void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
+{
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
+ *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
+ *flags |= I40E_FLAG_RS_FEC;
+ *flags &= ~I40E_FLAG_BASE_R_FEC;
+ }
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
+ *flags |= I40E_FLAG_BASE_R_FEC;
+ *flags &= ~I40E_FLAG_RS_FEC;
+ }
+ if (fec_cfg == 0)
+ *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
+}
+
+/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
@@ -14349,6 +14374,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+ /* set the FEC config due to the board capabilities */
+ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
+
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 2ac23ebfbf31..831d52bc3c9a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2069,6 +2069,11 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
+ if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
@@ -3384,8 +3389,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_info(&pf->pdev->dev,
"VF %d: Invalid input/s, can't apply cloud filter\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
- goto err;
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
@@ -3656,7 +3661,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
int ret;
pf->vf_aq_requests++;
- if (local_vf_id >= pf->num_alloc_vfs)
+ if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
return -EINVAL;
vf = &(pf->vf[local_vf_id]);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 870cf654e436..96d849460d9b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -10,69 +10,6 @@
#include "i40e_xsk.h"
/**
- * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs
- * @vsi: Current VSI
- *
- * Returns 0 on success, <0 on failure
- **/
-static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
-{
- if (vsi->xsk_umems)
- return 0;
-
- vsi->num_xsk_umems_used = 0;
- vsi->num_xsk_umems = vsi->alloc_queue_pairs;
- vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
- GFP_KERNEL);
- if (!vsi->xsk_umems) {
- vsi->num_xsk_umems = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
- * @vsi: Current VSI
- * @umem: UMEM to store
- * @qid: Ring/qid to associate with the UMEM
- *
- * Returns 0 on success, <0 on failure
- **/
-static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
- u16 qid)
-{
- int err;
-
- err = i40e_alloc_xsk_umems(vsi);
- if (err)
- return err;
-
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
-
- return 0;
-}
-
-/**
- * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
- * @vsi: Current VSI
- * @qid: Ring/qid associated with the UMEM
- **/
-static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid)
-{
- vsi->xsk_umems[qid] = NULL;
- vsi->num_xsk_umems_used--;
-
- if (vsi->num_xsk_umems == 0) {
- kfree(vsi->xsk_umems);
- vsi->xsk_umems = NULL;
- vsi->num_xsk_umems = 0;
- }
-}
-
-/**
* i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
* @vsi: Current VSI
* @umem: UMEM to DMA map
@@ -140,6 +77,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid)
{
+ struct net_device *netdev = vsi->netdev;
struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
@@ -150,12 +88,9 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (qid >= vsi->num_queue_pairs)
return -EINVAL;
- if (vsi->xsk_umems) {
- if (qid >= vsi->num_xsk_umems)
- return -EINVAL;
- if (vsi->xsk_umems[qid])
- return -EBUSY;
- }
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
if (!reuseq)
@@ -173,13 +108,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
err = i40e_queue_pair_disable(vsi, qid);
if (err)
return err;
- }
-
- err = i40e_add_xsk_umem(vsi, umem, qid);
- if (err)
- return err;
- if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -197,11 +126,13 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
**/
static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
{
+ struct net_device *netdev = vsi->netdev;
+ struct xdp_umem *umem;
bool if_running;
int err;
- if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
- !vsi->xsk_umems[qid])
+ umem = xdp_get_umem_from_qid(netdev, qid);
+ if (!umem)
return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
@@ -212,8 +143,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err;
}
- i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
- i40e_remove_xsk_umem(vsi, qid);
+ i40e_xsk_umem_dma_unmap(vsi, umem);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -237,20 +167,18 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
u16 qid)
{
+ struct net_device *netdev = vsi->netdev;
+ struct xdp_umem *queried_umem;
+
if (vsi->type != I40E_VSI_MAIN)
return -EINVAL;
- if (qid >= vsi->num_queue_pairs)
- return -EINVAL;
+ queried_umem = xdp_get_umem_from_qid(netdev, qid);
- if (vsi->xsk_umems) {
- if (qid >= vsi->num_xsk_umems)
- return -EINVAL;
- *umem = vsi->xsk_umems[qid];
- return 0;
- }
+ if (!queried_umem)
+ return -EINVAL;
- *umem = NULL;
+ *umem = queried_umem;
return 0;
}
@@ -945,13 +873,11 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
+ struct net_device *netdev = vsi->netdev;
int i;
- if (!vsi->xsk_umems)
- return false;
-
for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (vsi->xsk_umems[i])
+ if (xdp_get_umem_from_qid(netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a385575600f6..55944e089558 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -26,6 +26,7 @@
#include <linux/bitmap.h>
#include <linux/log2.h>
#include <linux/ip.h>
+#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
#include <linux/avf/virtchnl.h>
@@ -110,6 +111,9 @@ extern const char ice_drv_ver[];
#define ice_for_each_alloc_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
+#define ice_for_each_q_vector(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -129,6 +133,17 @@ struct ice_res_tracker {
u16 list[1];
};
+struct ice_qs_cfg {
+ struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */
+ unsigned long *pf_map;
+ unsigned long pf_map_size;
+ unsigned int q_count;
+ unsigned int scatter_count;
+ u16 *vsi_map;
+ u16 vsi_map_offset;
+ u8 mapping_mode;
+};
+
struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
@@ -270,6 +285,7 @@ enum ice_pf_flags {
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
+ ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_PF_FLAGS_NBITS /* must be last */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index fcdcd80b18e7..242c78469181 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -657,8 +657,13 @@ struct ice_aqc_get_topo {
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
+ * Add TSE (indirect 0x0401)
+ * Delete TSE (indirect 0x040F)
+ * Move TSE (indirect 0x0408)
+ * Suspend Nodes (indirect 0x0409)
+ * Resume Nodes (indirect 0x040A)
*/
-struct ice_aqc_get_cfg_elem {
+struct ice_aqc_sched_elem_cmd {
__le16 num_elem_req; /* Used by commands */
__le16 num_elem_resp; /* Used by responses */
__le32 reserved;
@@ -674,18 +679,6 @@ struct ice_aqc_suspend_resume_elem {
__le32 teid[1];
};
-/* Add TSE (indirect 0x0401)
- * Delete TSE (indirect 0x040F)
- * Move TSE (indirect 0x0408)
- */
-struct ice_aqc_add_move_delete_elem {
- __le16 num_grps_req;
- __le16 num_grps_updated;
- __le32 reserved;
- __le32 addr_high;
- __le32 addr_low;
-};
-
struct ice_aqc_elem_info_bw {
__le16 bw_profile_idx;
__le16 bw_alloc;
@@ -854,11 +847,46 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33)
#define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34)
#define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35)
+#define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41)
+#define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42)
+#define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43)
+#define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45)
+#define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48)
+#define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49)
+#define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52)
+#define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56)
+#define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57)
+#define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60)
+#define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62)
+#define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63)
#define ICE_PHY_TYPE_LOW_MAX_INDEX 63
+/* The second set of defines is for phy_type_high. */
+#define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 reserved;
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
#define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0)
#define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1)
@@ -923,7 +951,7 @@ struct ice_aqc_set_phy_cfg {
/* Set PHY config command data structure */
struct ice_aqc_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 rsvd0;
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
@@ -1032,10 +1060,12 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_SPEED_20GB BIT(6)
#define ICE_AQ_LINK_SPEED_25GB BIT(7)
#define ICE_AQ_LINK_SPEED_40GB BIT(8)
+#define ICE_AQ_LINK_SPEED_50GB BIT(9)
+#define ICE_AQ_LINK_SPEED_100GB BIT(10)
#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
__le32 reserved3; /* Aligns next field to 8-byte boundary */
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 reserved4;
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
};
/* Set event mask command (direct 0x0613) */
@@ -1055,6 +1085,16 @@ struct ice_aqc_set_event_mask {
u8 reserved1[6];
};
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ice_aqc_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 ident_mode;
+#define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0)
+#define ICE_AQC_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
@@ -1341,12 +1381,12 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
struct ice_aqc_get_topo get_topo;
- struct ice_aqc_get_cfg_elem get_update_elem;
+ struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
- struct ice_aqc_add_move_delete_elem add_move_delete_elem;
struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
@@ -1442,6 +1482,7 @@ enum ice_adminq_opc {
ice_aqc_opc_restart_an = 0x0605,
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
+ ice_aqc_opc_set_port_id_led = 0x06E9,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 4c1d35da940d..b17ade424423 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -165,8 +165,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(report_mode);
status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
- if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
+ if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+ pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ }
return status;
}
@@ -183,6 +185,9 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
return ICE_MEDIA_UNKNOWN;
hw_link_info = &pi->phy.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ICE_MEDIA_UNKNOWN;
if (hw_link_info->phy_type_low) {
switch (hw_link_info->phy_type_low) {
@@ -196,6 +201,15 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
return ICE_MEDIA_FIBER;
case ICE_PHY_TYPE_LOW_100BASE_TX:
case ICE_PHY_TYPE_LOW_1000BASE_T:
@@ -209,6 +223,11 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
case ICE_PHY_TYPE_LOW_25GBASE_CR1:
case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
return ICE_MEDIA_DA;
case ICE_PHY_TYPE_LOW_1000BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_KX:
@@ -219,10 +238,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25GBASE_KR1:
case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ return ICE_MEDIA_BACKPLANE;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
return ICE_MEDIA_BACKPLANE;
}
}
-
return ICE_MEDIA_UNKNOWN;
}
@@ -274,6 +301,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* update current link status information */
hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
hw_link_info->link_info = link_data.link_info;
hw_link_info->an_info = link_data.an_info;
@@ -750,6 +778,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
status = ICE_ERR_CFG;
goto err_unroll_sched;
}
+ INIT_LIST_HEAD(&hw->agg_list);
status = ice_init_fltr_mgmt_struct(hw);
if (status)
@@ -800,6 +829,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_cleanup_fltr_mgmt_struct(hw);
ice_sched_cleanup_all(hw);
+ ice_sched_clear_agg(hw);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
@@ -1655,7 +1685,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
* This function is used to write MAC address to the NVM (0x0108).
*/
enum ice_status
-ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_write *cmd;
@@ -1667,8 +1697,8 @@ ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
cmd->flags = flags;
/* Prep values for flags, sah, sal */
- cmd->sah = htons(*((u16 *)mac_addr));
- cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
+ cmd->sah = htons(*((const u16 *)mac_addr));
+ cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -1705,16 +1735,20 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
+ * @phy_type_high: higher part of phy_type
*
- * This helper function will convert a phy_type_low to its corresponding link
+ * This helper function will convert an entry in phy type structure
+ * [phy_type_low, phy_type_high] to its corresponding link speed.
+ * Note: In the structure of [phy_type_low, phy_type_high], there should
+ * be one bit set, as this function will convert one phy type to its
* speed.
- * Note: In the structure of phy_type_low, there should be one bit set, as
- * this function will convert one phy type to its speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
*/
-static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+static u16
+ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
{
+ u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
switch (phy_type_low) {
@@ -1768,41 +1802,110 @@ static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
case ICE_PHY_TYPE_LOW_40G_XLAUI:
speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
break;
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
+ break;
default:
speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
}
- return speed_phy_type_low;
+ switch (phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
+ break;
+ default:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return speed_phy_type_low;
+ else
+ return speed_phy_type_high;
}
/**
* ice_update_phy_type
* @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
* @link_speeds_bitmap: targeted link speeds bitmap
*
* Note: For the link_speeds_bitmap structure, you can check it at
* [ice_aqc_get_link_status->link_speed]. Caller can pass in
* link_speeds_bitmap include multiple speeds.
*
- * The value of phy_type_low will present a certain link speed. This helper
- * function will turn on bits in the phy_type_low based on the value of
+ * Each entry in this [phy_type_low, phy_type_high] structure will
+ * present a certain link speed. This helper function will turn on bits
+ * in [phy_type_low, phy_type_high] structure based on the value of
* link_speeds_bitmap input parameter.
*/
-void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
+void
+ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
+ u16 link_speeds_bitmap)
{
u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+ u64 pt_high;
u64 pt_low;
int index;
/* We first check with low part of phy_type */
for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
pt_low = BIT_ULL(index);
- speed = ice_get_link_speed_based_on_phy_type(pt_low);
+ speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
if (link_speeds_bitmap & speed)
*phy_type_low |= BIT_ULL(index);
}
+
+ /* We then check with high part of phy_type */
+ for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
+ pt_high = BIT_ULL(index);
+ speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
+
+ if (link_speeds_bitmap & speed)
+ *phy_type_high |= BIT_ULL(index);
+ }
}
/**
@@ -1934,6 +2037,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
if (ena_auto_link_update)
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
/* Copy over all the old settings */
+ cfg.phy_type_high = pcaps->phy_type_high;
cfg.phy_type_low = pcaps->phy_type_low;
cfg.low_power_ctrl = pcaps->low_power_ctrl;
cfg.eee_cap = pcaps->eee_cap;
@@ -2032,6 +2136,34 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
}
/**
+ * ice_aq_set_port_id_led
+ * @pi: pointer to the port information
+ * @is_orig_mode: is this LED set to original mode (by the net-list)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set LED value for the given port (0x06e9)
+ */
+enum ice_status
+ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_port_id_led *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
+
+ if (is_orig_mode)
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: VSI FW index
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index cf760c24a6aa..d7c7c2ed8823 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -28,6 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
+enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words,
+ u16 *data);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
@@ -70,9 +72,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
void
-ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap);
+ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
+ u16 link_speeds_bitmap);
enum ice_status
-ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
@@ -86,6 +89,10 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
+ struct ice_sq_cd *cd);
+
+enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cmd_details);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 3b6e387f5440..a82f0202652d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -114,6 +114,22 @@ static const u32 ice_regs_dump_list[] = {
QRX_ITR(0),
};
+struct ice_priv_flag {
+ char name[ETH_GSTRING_LEN];
+ u32 bitno; /* bit position in pf->flags */
+};
+
+#define ICE_PRIV_FLAG(_name, _bitno) { \
+ .name = _name, \
+ .bitno = _bitno, \
+}
+
+static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
+ ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
+};
+
+#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
+
/**
* ice_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
@@ -152,6 +168,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+ drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
static int ice_get_regs_len(struct net_device __always_unused *netdev)
@@ -203,6 +220,55 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data)
#endif /* !CONFIG_DYNAMIC_DEBUG */
}
+static int ice_get_eeprom_len(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ return (int)(pf->hw.nvm.sr_words * sizeof(u16));
+}
+
+static int
+ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ u16 first_word, last_word, nwords;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ struct device *dev;
+ int ret = 0;
+ u16 *buf;
+
+ dev = &pf->pdev->dev;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ nwords = last_word - first_word + 1;
+
+ buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ status = ice_read_sr_buf(hw, first_word, &nwords, buf);
+ if (status) {
+ dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ eeprom->len = sizeof(u16) * nwords;
+ ret = -EIO;
+ goto out;
+ }
+
+ memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len);
+out:
+ devm_kfree(dev, buf);
+ return ret;
+}
+
static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -244,11 +310,99 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ ice_gstrings_priv_flags[i].name);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
default:
break;
}
}
+static int
+ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ bool led_active;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ led_active = true;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ led_active = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_get_priv_flags - report device private flags
+ * @netdev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ */
+static u32 ice_get_priv_flags(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u32 i, ret_flags = 0;
+
+ for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
+ const struct ice_priv_flag *priv_flag;
+
+ priv_flag = &ice_gstrings_priv_flags[i];
+
+ if (test_bit(priv_flag->bitno, pf->flags))
+ ret_flags |= BIT(i);
+ }
+
+ return ret_flags;
+}
+
+/**
+ * ice_set_priv_flags - set private flags
+ * @netdev: network interface device structure
+ * @flags: bit flags to be set
+ */
+static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u32 i;
+
+ if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
+ return -EINVAL;
+
+ for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
+ const struct ice_priv_flag *priv_flag;
+
+ priv_flag = &ice_gstrings_priv_flags[i];
+
+ if (flags & BIT(i))
+ set_bit(priv_flag->bitno, pf->flags);
+ else
+ clear_bit(priv_flag->bitno, pf->flags);
+ }
+
+ return 0;
+}
+
static int ice_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
@@ -272,6 +426,8 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* not safe.
*/
return ICE_ALL_STATS_LEN(netdev);
+ case ETH_SS_PRIV_FLAGS:
+ return ICE_PRIV_FLAG_ARRAY_SIZE;
default:
return -EOPNOTSUPP;
}
@@ -337,16 +493,20 @@ ice_get_ethtool_stats(struct net_device *netdev,
* @netdev: network interface device structure
* @ks: ethtool link ksettings struct to fill out
*/
-static void ice_phy_type_to_ethtool(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
+static void
+ice_phy_type_to_ethtool(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
+ bool need_add_adv_mode = false;
struct ice_vsi *vsi = np->vsi;
+ u64 phy_types_high;
u64 phy_types_low;
hw_link_info = &vsi->port_info->phy.link_info;
phy_types_low = vsi->port_info->phy.phy_type_low;
+ phy_types_high = vsi->port_info->phy.phy_type_high;
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
@@ -495,6 +655,95 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseLR4_Full);
}
+ if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseCR2_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseKR2_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseKR2_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseSR2_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseSR2_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ need_add_adv_mode = true;
+ }
+ if (need_add_adv_mode) {
+ need_add_adv_mode = false;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseCR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseSR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ need_add_adv_mode = true;
+ }
+ if (need_add_adv_mode) {
+ need_add_adv_mode = false;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseSR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseLR4_ER4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ need_add_adv_mode = true;
+ }
+ if (need_add_adv_mode) {
+ need_add_adv_mode = false;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseLR4_ER4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseKR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ need_add_adv_mode = true;
+ }
+ if (need_add_adv_mode)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseKR4_Full);
/* Autoneg PHY types */
if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
@@ -520,6 +769,24 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, advertising,
Autoneg);
}
+ if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
+ }
}
#define TEST_SET_BITS_TIMEOUT 50
@@ -531,13 +798,15 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev,
* @ks: ethtool ksettings to fill in
* @netdev: network interface device structure
*/
-static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
- struct net_device *netdev)
+static void
+ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
+ struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
+ bool unrecog_phy_high = false;
bool unrecog_phy_low = false;
link_info = &vsi->port_info->phy.link_info;
@@ -699,14 +968,116 @@ static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseKR4_Full);
break;
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseCR2_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseKR2_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseKR2_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseSR2_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseSR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseLR4_ER4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseKR4_Full);
+ break;
default:
unrecog_phy_low = true;
}
- if (unrecog_phy_low) {
+ switch (link_info->phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseKR4_Full);
+ break;
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ break;
+ default:
+ unrecog_phy_high = true;
+ }
+
+ if (unrecog_phy_low && unrecog_phy_high) {
/* if we got here and link is up something bad is afoot */
- netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n",
+ netdev_info(netdev,
+ "WARNING: Unrecognized PHY_Low (0x%llx).\n",
(u64)link_info->phy_type_low);
+ netdev_info(netdev,
+ "WARNING: Unrecognized PHY_High (0x%llx).\n",
+ (u64)link_info->phy_type_high);
}
/* Now that we've worked out everything that could be supported by the
@@ -718,6 +1089,12 @@ static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ethtool_intersect_link_masks(ks, &cap_ksettings);
switch (link_info->link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ ks->base.speed = SPEED_100000;
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ ks->base.speed = SPEED_50000;
+ break;
case ICE_AQ_LINK_SPEED_40GB:
ks->base.speed = SPEED_40000;
break;
@@ -911,6 +1288,23 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
ethtool_link_ksettings_test_link_mode(ks, advertising,
40000baseKR4_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_40GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseCR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseKR2_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseSR2_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseCR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseSR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseLR4_ER4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseKR4_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;
return adv_link_speed;
}
@@ -981,8 +1375,9 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
*
* Set speed/duplex per media_types advertised/forced
*/
-static int ice_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *ks)
+static int
+ice_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
{
u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0;
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -994,6 +1389,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
struct ice_port_info *p;
u8 autoneg_changed = 0;
enum ice_status status;
+ u64 phy_type_high;
u64 phy_type_low;
int err = 0;
bool linkup;
@@ -1109,7 +1505,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
adv_link_speed = curr_link_speed;
/* Convert the advertise link speeds to their corresponded PHY_TYPE */
- ice_update_phy_type(&phy_type_low, adv_link_speed);
+ ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
if (!autoneg_changed && adv_link_speed == curr_link_speed) {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -1128,7 +1524,9 @@ static int ice_set_link_ksettings(struct net_device *netdev,
/* set link and auto negotiation so changes take effect */
config.caps |= ICE_AQ_PHY_ENA_LINK;
- if (phy_type_low) {
+ if (phy_type_low || phy_type_high) {
+ config.phy_type_high = cpu_to_le64(phy_type_high) &
+ abilities->phy_type_high;
config.phy_type_low = cpu_to_le64(phy_type_low) &
abilities->phy_type_low;
} else {
@@ -1667,6 +2065,258 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0;
}
+enum ice_container_type {
+ ICE_RX_CONTAINER,
+ ICE_TX_CONTAINER,
+};
+
+/**
+ * ice_get_rc_coalesce - get ITR values for specific ring container
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ * @c_type: container type, RX or TX
+ * @rc: ring container that the ITR values will come from
+ *
+ * Query the device for ice_ring_container specific ITR values. This is
+ * done per ice_ring_container because each q_vector can have 1 or more rings
+ * and all of said ring(s) will have the same ITR values.
+ *
+ * Returns 0 on success, negative otherwise.
+ */
+static int
+ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
+ struct ice_ring_container *rc)
+{
+ struct ice_pf *pf = rc->ring->vsi->back;
+
+ switch (c_type) {
+ case ICE_RX_CONTAINER:
+ ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
+ ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ break;
+ case ICE_TX_CONTAINER:
+ ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
+ ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ break;
+ default:
+ dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __ice_get_coalesce - get ITR/INTRL values for the device
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ * @q_num: queue number to get the coalesce settings for
+ */
+static int
+__ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
+ int q_num)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ int tx = -EINVAL, rx = -EINVAL;
+ struct ice_vsi *vsi = np->vsi;
+
+ if (q_num < 0) {
+ rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[0]->q_vector->rx);
+ tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[0]->q_vector->tx);
+
+ goto update_coalesced_frames;
+ }
+
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx);
+ tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx);
+ } else if (q_num < vsi->num_rxq) {
+ rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx);
+ } else if (q_num < vsi->num_txq) {
+ tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx);
+ } else {
+ /* q_num is invalid for both Rx and Tx queues */
+ return -EINVAL;
+ }
+
+update_coalesced_frames:
+ /* either q_num is invalid for both Rx and Tx queues or setting coalesce
+ * failed completely
+ */
+ if (tx && rx)
+ return -EINVAL;
+
+ if (q_num < vsi->num_txq)
+ ec->tx_max_coalesced_frames_irq = vsi->work_lmt;
+
+ if (q_num < vsi->num_rxq)
+ ec->rx_max_coalesced_frames_irq = vsi->work_lmt;
+
+ return 0;
+}
+
+static int
+ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+{
+ return __ice_get_coalesce(netdev, ec, -1);
+}
+
+static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
+{
+ return __ice_get_coalesce(netdev, ec, q_num);
+}
+
+/**
+ * ice_set_rc_coalesce - set ITR values for specific ring container
+ * @c_type: container type, RX or TX
+ * @ec: ethtool structure from user to update ITR settings
+ * @rc: ring container that the ITR values will come from
+ * @vsi: VSI associated to the ring container
+ *
+ * Set specific ITR values. This is done per ice_ring_container because each
+ * q_vector can have 1 or more rings and all of said ring(s) will have the same
+ * ITR values.
+ *
+ * Returns 0 on success, negative otherwise.
+ */
+static int
+ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
+ struct ice_ring_container *rc, struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 itr_setting;
+
+ if (!rc->ring)
+ return -EINVAL;
+
+ itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+
+ switch (c_type) {
+ case ICE_RX_CONTAINER:
+ if (ec->rx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_rx_coalesce) {
+ netdev_info(vsi->netdev,
+ "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs > ICE_ITR_MAX) {
+ netdev_info(vsi->netdev,
+ "Invalid value, rx-usecs range is 0-%d\n",
+ ICE_ITR_MAX);
+ return -EINVAL;
+ }
+
+ if (ec->use_adaptive_rx_coalesce) {
+ rc->itr_setting |= ICE_ITR_DYNAMIC;
+ } else {
+ rc->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ }
+ break;
+ case ICE_TX_CONTAINER:
+ if (ec->tx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_tx_coalesce) {
+ netdev_info(vsi->netdev,
+ "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
+ return -EINVAL;
+ }
+
+ if (ec->tx_coalesce_usecs > ICE_ITR_MAX) {
+ netdev_info(vsi->netdev,
+ "Invalid value, tx-usecs range is 0-%d\n",
+ ICE_ITR_MAX);
+ return -EINVAL;
+ }
+
+ if (ec->use_adaptive_tx_coalesce) {
+ rc->itr_setting |= ICE_ITR_DYNAMIC;
+ } else {
+ rc->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ }
+ break;
+ default:
+ dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+__ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
+ int q_num)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ int rx = -EINVAL, tx = -EINVAL;
+ struct ice_vsi *vsi = np->vsi;
+
+ if (q_num < 0) {
+ int i;
+
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &q_vector->rx, vsi) ||
+ ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &q_vector->tx, vsi))
+ return -EINVAL;
+ }
+
+ goto set_work_lmt;
+ }
+
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi);
+ tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi);
+ } else if (q_num < vsi->num_rxq) {
+ rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi);
+ } else if (q_num < vsi->num_txq) {
+ tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi);
+ }
+
+ /* either q_num is invalid for both Rx and Tx queues or setting coalesce
+ * failed completely
+ */
+ if (rx && tx)
+ return -EINVAL;
+
+set_work_lmt:
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq,
+ ec->rx_max_coalesced_frames_irq);
+
+ return 0;
+}
+
+static int
+ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+{
+ return __ice_set_coalesce(netdev, ec, -1);
+}
+
+static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
+{
+ return __ice_set_coalesce(netdev, ec, q_num);
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
@@ -1676,8 +2326,15 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_eeprom_len = ice_get_eeprom_len,
+ .get_eeprom = ice_get_eeprom,
+ .get_coalesce = ice_get_coalesce,
+ .set_coalesce = ice_set_coalesce,
.get_strings = ice_get_strings,
+ .set_phys_id = ice_set_phys_id,
.get_ethtool_stats = ice_get_ethtool_stats,
+ .get_priv_flags = ice_get_priv_flags,
+ .set_priv_flags = ice_set_priv_flags,
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
.get_ringparam = ice_get_ringparam,
@@ -1689,6 +2346,9 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_per_queue_coalesce = ice_get_per_q_coalesce,
+ .set_per_queue_coalesce = ice_set_per_q_coalesce,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 5507928c8fbe..f9a38f2cd470 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -110,6 +110,7 @@
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_INTERVAL_S 5
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index bb51dd7defb5..ef4c79b5aa32 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -346,6 +346,7 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
};
@@ -488,5 +489,7 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
#define ICE_LINK_SPEED_20000MBPS 20000
#define ICE_LINK_SPEED_25000MBPS 25000
#define ICE_LINK_SPEED_40000MBPS 40000
+#define ICE_LINK_SPEED_50000MBPS 50000
+#define ICE_LINK_SPEED_100000MBPS 100000
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 29b1dcfd4331..27c3760ae5cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -514,110 +514,89 @@ unlock_pf:
}
/**
- * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
- * @vsi: the VSI getting queues
+ * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
*
- * Return 0 on success and a negative value on error
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
-static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
+static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
{
- struct ice_pf *pf = vsi->back;
- int offset, ret = 0;
-
- mutex_lock(&pf->avail_q_mutex);
- /* look for contiguous block of queues for Tx */
- offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
- 0, vsi->alloc_txq, 0);
- if (offset < ICE_MAX_TXQS) {
- int i;
+ int offset, i;
- bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
- for (i = 0; i < vsi->alloc_txq; i++)
- vsi->txq_map[i] = i + offset;
- } else {
- ret = -ENOMEM;
- vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
+ mutex_lock(qs_cfg->qs_mutex);
+ offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
+ 0, qs_cfg->q_count, 0);
+ if (offset >= qs_cfg->pf_map_size) {
+ mutex_unlock(qs_cfg->qs_mutex);
+ return -ENOMEM;
}
- /* look for contiguous block of queues for Rx */
- offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
- 0, vsi->alloc_rxq, 0);
- if (offset < ICE_MAX_RXQS) {
- int i;
-
- bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
- for (i = 0; i < vsi->alloc_rxq; i++)
- vsi->rxq_map[i] = i + offset;
- } else {
- ret = -ENOMEM;
- vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
- }
- mutex_unlock(&pf->avail_q_mutex);
+ bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
+ for (i = 0; i < qs_cfg->q_count; i++)
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
+ mutex_unlock(qs_cfg->qs_mutex);
- return ret;
+ return 0;
}
/**
- * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
- * @vsi: the VSI getting queues
+ * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
*
- * Return 0 on success and a negative value on error
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
-static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
+static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
{
- struct ice_pf *pf = vsi->back;
int i, index = 0;
- mutex_lock(&pf->avail_q_mutex);
-
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
- for (i = 0; i < vsi->alloc_txq; i++) {
- index = find_next_zero_bit(pf->avail_txqs,
- ICE_MAX_TXQS, index);
- if (index < ICE_MAX_TXQS) {
- set_bit(index, pf->avail_txqs);
- vsi->txq_map[i] = index;
- } else {
- goto err_scatter_tx;
- }
- }
+ mutex_lock(qs_cfg->qs_mutex);
+ for (i = 0; i < qs_cfg->q_count; i++) {
+ index = find_next_zero_bit(qs_cfg->pf_map,
+ qs_cfg->pf_map_size, index);
+ if (index >= qs_cfg->pf_map_size)
+ goto err_scatter;
+ set_bit(index, qs_cfg->pf_map);
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
}
+ mutex_unlock(qs_cfg->qs_mutex);
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
- index = find_next_zero_bit(pf->avail_rxqs,
- ICE_MAX_RXQS, index);
- if (index < ICE_MAX_RXQS) {
- set_bit(index, pf->avail_rxqs);
- vsi->rxq_map[i] = index;
- } else {
- goto err_scatter_rx;
- }
- }
- }
-
- mutex_unlock(&pf->avail_q_mutex);
return 0;
-
-err_scatter_rx:
- /* unflag any queues we have grabbed (i is failed position) */
- for (index = 0; index < i; index++) {
- clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
- vsi->rxq_map[index] = 0;
- }
- i = vsi->alloc_txq;
-err_scatter_tx:
- /* i is either position of failed attempt or vsi->alloc_txq */
+err_scatter:
for (index = 0; index < i; index++) {
- clear_bit(vsi->txq_map[index], pf->avail_txqs);
- vsi->txq_map[index] = 0;
+ clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
+ qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
}
+ mutex_unlock(qs_cfg->qs_mutex);
- mutex_unlock(&pf->avail_q_mutex);
return -ENOMEM;
}
/**
+ * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ *
+ * This is an internal function for assigning queues from the PF to VSI and
+ * initially tries to find contiguous space. If it is not successful to find
+ * contiguous space, then it tries with the scatter approach.
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
+{
+ int ret = 0;
+
+ ret = __ice_vsi_get_qs_contig(qs_cfg);
+ if (ret) {
+ /* contig failed, so try with scatter approach */
+ qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
+ qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
+ qs_cfg->scatter_count);
+ ret = __ice_vsi_get_qs_sc(qs_cfg);
+ }
+ return ret;
+}
+
+/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
@@ -625,25 +604,35 @@ err_scatter_tx:
*/
static int ice_vsi_get_qs(struct ice_vsi *vsi)
{
+ struct ice_pf *pf = vsi->back;
+ struct ice_qs_cfg tx_qs_cfg = {
+ .qs_mutex = &pf->avail_q_mutex,
+ .pf_map = pf->avail_txqs,
+ .pf_map_size = ICE_MAX_TXQS,
+ .q_count = vsi->alloc_txq,
+ .scatter_count = ICE_MAX_SCATTER_TXQS,
+ .vsi_map = vsi->txq_map,
+ .vsi_map_offset = 0,
+ .mapping_mode = vsi->tx_mapping_mode
+ };
+ struct ice_qs_cfg rx_qs_cfg = {
+ .qs_mutex = &pf->avail_q_mutex,
+ .pf_map = pf->avail_rxqs,
+ .pf_map_size = ICE_MAX_RXQS,
+ .q_count = vsi->alloc_rxq,
+ .scatter_count = ICE_MAX_SCATTER_RXQS,
+ .vsi_map = vsi->rxq_map,
+ .vsi_map_offset = 0,
+ .mapping_mode = vsi->rx_mapping_mode
+ };
int ret = 0;
vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
- /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping
- * modes individually to scatter if assigning contiguous queues
- * to Rx or Tx fails
- */
- ret = ice_vsi_get_qs_contig(vsi);
- if (ret < 0) {
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
- vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
- ICE_MAX_SCATTER_TXQS);
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
- vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
- ICE_MAX_SCATTER_RXQS);
- ret = ice_vsi_get_qs_scatter(vsi);
- }
+ ret = __ice_vsi_get_qs(&tx_qs_cfg);
+ if (!ret)
+ ret = __ice_vsi_get_qs(&rx_qs_cfg);
return ret;
}
@@ -1614,11 +1603,14 @@ setup_rings:
/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
+ * @rings: Tx ring array to be configured
+ * @offset: offset within vsi->txq_map
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
-int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
+static int
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
@@ -1626,7 +1618,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
u8 num_q_grps, q_idx = 0;
enum ice_status status;
u16 buf_len, i, pf_q;
- int err = 0, tc = 0;
+ int err = 0, tc;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
@@ -1644,9 +1636,8 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
struct ice_tlan_ctx tlan_ctx = { 0 };
- pf_q = vsi->txq_map[q_idx];
- ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
- pf_q);
+ pf_q = vsi->txq_map[q_idx + offset];
+ ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
@@ -1655,7 +1646,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
- vsi->tx_rings[q_idx]->tail =
+ rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
num_q_grps, qg_buf, buf_len,
@@ -1674,7 +1665,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
- vsi->tx_rings[q_idx]->txq_teid =
+ rings[q_idx]->txq_teid =
le32_to_cpu(txq->q_teid);
q_idx++;
@@ -1686,6 +1677,18 @@ err_cfg_txqs:
}
/**
+ * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+{
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
+}
+
+/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
* @gran: interrupt rate limit granularity in usecs
@@ -1714,22 +1717,34 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
static void
ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
{
- u8 itr_gran = hw->itr_gran;
-
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
- rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_RX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
if (q_vector->num_ring_tx) {
struct ice_ring_container *rc = &q_vector->tx;
- rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_TX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
}
@@ -1897,9 +1912,12 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM
+ * @rings: Tx ring array to be stopped
+ * @offset: offset within vsi->txq_map
*/
-int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num)
+static int
+ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring **rings, int offset)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -1927,19 +1945,18 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
ice_for_each_txq(vsi, i) {
u16 v_idx;
- if (!vsi->tx_rings || !vsi->tx_rings[i] ||
- !vsi->tx_rings[i]->q_vector) {
+ if (!rings || !rings[i] || !rings[i]->q_vector) {
err = -EINVAL;
goto err_out;
}
- q_ids[i] = vsi->txq_map[i];
- q_teids[i] = vsi->tx_rings[i]->txq_teid;
+ q_ids[i] = vsi->txq_map[i + offset];
+ q_teids[i] = rings[i]->txq_teid;
/* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+ wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
/* software is expected to wait for 100 ns */
ndelay(100);
@@ -1947,7 +1964,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
/* trigger a software interrupt for the vector associated to
* the queue to schedule NAPI handler
*/
- v_idx = vsi->tx_rings[i]->q_vector->v_idx;
+ v_idx = rings[i]->q_vector->v_idx;
wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
@@ -1977,6 +1994,19 @@ err_alloc_q_ids:
}
/**
+ * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative id of VF/VM
+ */
+int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
+ enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
+{
+ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
+ 0);
+}
+
+/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
@@ -2581,6 +2611,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
goto err_vectors;
ice_vsi_map_rings_to_vectors(vsi);
+ /* Do not exit if configuring RSS had an issue, at least
+ * receive traffic on first queue. Hence no need to capture
+ * return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags))
+ ice_vsi_cfg_rss_lut_key(vsi);
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 3831b4f0960a..7988a53729a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -15,7 +15,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
-int ice_vsi_cfg_txqs(struct ice_vsi *vsi);
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
-int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+int
+ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8725569d11f0..48f033928aa2 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1389,7 +1389,6 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
int oicr_idx, err = 0;
- u8 itr_gran;
u32 val;
if (!pf->int_name[0])
@@ -1453,10 +1452,8 @@ skip_req_irq:
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
- itr_gran = hw->itr_gran;
-
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
- ITR_TO_REG(ICE_ITR_8K, itr_gran));
+ ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
ice_flush(hw);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -1531,6 +1528,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
csumo_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CRC |
NETIF_F_IPV6_CSUM;
vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -1998,6 +1996,23 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_verify_itr_gran - verify driver's assumption of ITR granularity
+ * @pf: pointer to the PF structure
+ *
+ * There is no error returned here because the driver will be able to handle a
+ * different ITR granularity, but interrupt moderation will not be accurate if
+ * the driver's assumptions are not verified. This assumption is made so we can
+ * use constants in the hot path instead of accessing structure members.
+ */
+static void ice_verify_itr_gran(struct ice_pf *pf)
+{
+ if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1))
+ dev_warn(&pf->pdev->dev,
+ "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n",
+ (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran);
+}
+
+/**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure
*
@@ -2163,6 +2178,7 @@ static int ice_probe(struct pci_dev *pdev,
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
ice_verify_cacheline_size(pf);
+ ice_verify_itr_gran(pf);
return 0;
@@ -2422,7 +2438,8 @@ static void ice_set_rx_mode(struct net_device *netdev)
*/
static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags)
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
int err;
@@ -2546,7 +2563,8 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
- err = ice_vsi_cfg_txqs(vsi);
+
+ err = ice_vsi_cfg_lan_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -2945,12 +2963,91 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
+ * ice_force_phys_link_state - Force the physical link state
+ * @vsi: VSI to force the physical link state to up/down
+ * @link_up: true/false indicates to set the physical link to up/down
+ *
+ * Force the physical link state by getting the current PHY capabilities from
+ * hardware and setting the PHY config based on the determined capabilities. If
+ * link changes a link event will be triggered because both the Enable Automatic
+ * Link Update and LESM Enable bits are set when setting the PHY capabilities.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_port_info *pi;
+ struct device *dev;
+ int retcode;
+
+ if (!vsi || !vsi->port_info || !vsi->back)
+ return -EINVAL;
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ dev = &vsi->back->pdev->dev;
+
+ pi = vsi->port_info;
+
+ pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (retcode) {
+ dev_err(dev,
+ "Failed to get phy capabilities, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ goto out;
+ }
+
+ /* No change in link */
+ if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
+ link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
+ goto out;
+
+ cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ retcode = -ENOMEM;
+ goto out;
+ }
+
+ cfg->phy_type_low = pcaps->phy_type_low;
+ cfg->phy_type_high = pcaps->phy_type_high;
+ cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ cfg->low_power_ctrl = pcaps->low_power_ctrl;
+ cfg->eee_cap = pcaps->eee_cap;
+ cfg->eeer_value = pcaps->eeer_value;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+ if (link_up)
+ cfg->caps |= ICE_AQ_PHY_ENA_LINK;
+ else
+ cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
+
+ retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
+ if (retcode) {
+ dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ }
+
+ devm_kfree(dev, cfg);
+out:
+ devm_kfree(dev, pcaps);
+ return retcode;
+}
+
+/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err;
+ int i, tx_err, rx_err, link_err = 0;
/* Caller of this function is expected to set the
* vsi->state __ICE_DOWN bit
@@ -2961,7 +3058,8 @@ int ice_down(struct ice_vsi *vsi)
}
ice_vsi_dis_irq(vsi);
- tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
+
+ tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
if (tx_err)
netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n",
@@ -2975,13 +3073,21 @@ int ice_down(struct ice_vsi *vsi)
ice_napi_disable_all(vsi);
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+ link_err = ice_force_phys_link_state(vsi, false);
+ if (link_err)
+ netdev_err(vsi->netdev,
+ "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+ }
+
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err) {
+ if (tx_err || rx_err || link_err) {
netdev_err(vsi->netdev,
"Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
@@ -3641,7 +3747,8 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
*/
static int
ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
- u16 __always_unused flags, struct netlink_ext_ack *extack)
+ u16 __always_unused flags,
+ struct netlink_ext_ack __always_unused *extack)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_pf *pf = np->vsi->back;
@@ -3814,8 +3921,14 @@ static int ice_open(struct net_device *netdev)
netif_carrier_off(netdev);
- err = ice_vsi_open(vsi);
+ err = ice_force_phys_link_state(vsi, true);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set physical link up, error %d\n", err);
+ return err;
+ }
+ err = ice_vsi_open(vsi);
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 3274c543283c..ce64cecdae9c 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -125,6 +125,62 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
}
/**
+ * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the SR using the ice_read_sr_aq
+ * method. Ownership of the NVM is taken before reading the buffer and later
+ * released.
+ */
+static enum ice_status
+ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
+{
+ enum ice_status status;
+ bool last_cmd = false;
+ u16 words_read = 0;
+ u16 i = 0;
+
+ do {
+ u16 read_size, off_w;
+
+ /* Calculate number of bytes we should read in this step.
+ * It's not allowed to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS;
+ read_size = off_w ?
+ min(*words,
+ (u16)(ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) :
+ min((*words - words_read), ICE_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = true;
+
+ status = ice_read_sr_aq(hw, offset, read_size,
+ data + words_read, last_cmd);
+ if (status)
+ goto read_nvm_buf_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+read_nvm_buf_aq_exit:
+ *words = words_read;
+ return status;
+}
+
+/**
* ice_acquire_nvm - Generic request for acquiring the NVM ownership
* @hw: pointer to the HW structure
* @access: NVM access type (read or write)
@@ -234,3 +290,28 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status;
}
+
+/**
+ * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq
+ * method. The buf read is preceded by the NVM ownership take
+ * and followed by the release.
+ */
+enum ice_status
+ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
+{
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (!status) {
+ status = ice_read_sr_buf_aq(hw, offset, words, data);
+ ice_release_nvm(hw);
+ }
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index a1681853df2e..fb38e8be1e2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -85,37 +85,59 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
- * ice_aq_query_sched_elems - query scheduler elements
+ * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the hw struct
- * @elems_req: number of elements to query
+ * @cmd_opc: cmd opcode
+ * @elems_req: number of elements to request
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
- * @elems_ret: returns total number of elements returned
+ * @elems_resp: returns total number of elements response
* @cd: pointer to command details structure or NULL
*
- * Query scheduling elements (0x0404)
+ * This function sends a scheduling elements cmd (cmd_opc)
*/
static enum ice_status
-ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_get_elem *buf, u16 buf_size,
- u16 *elems_ret, struct ice_sq_cd *cd)
+ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
+ u16 elems_req, void *buf, u16 buf_size,
+ u16 *elems_resp, struct ice_sq_cd *cd)
{
- struct ice_aqc_get_cfg_elem *cmd;
+ struct ice_aqc_sched_elem_cmd *cmd;
struct ice_aq_desc desc;
enum ice_status status;
- cmd = &desc.params.get_update_elem;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems);
+ cmd = &desc.params.sched_elem_cmd;
+ ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
cmd->num_elem_req = cpu_to_le16(elems_req);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status && elems_ret)
- *elems_ret = le16_to_cpu(cmd->num_elem_resp);
+ if (!status && elems_resp)
+ *elems_resp = le16_to_cpu(cmd->num_elem_resp);
return status;
}
/**
+ * ice_aq_query_sched_elems - query scheduler elements
+ * @hw: pointer to the hw struct
+ * @elems_req: number of elements to query
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements returned
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query scheduling elements (0x0404)
+ */
+static enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_ret, cd);
+}
+
+/**
* ice_sched_query_elem - query element information from hw
* @hw: pointer to the hw struct
* @node_teid: node teid to be queried
@@ -218,20 +240,9 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_delete_elem *buf, u16 buf_size,
u16 *grps_del, struct ice_sq_cd *cd)
{
- struct ice_aqc_add_move_delete_elem *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- cmd = &desc.params.add_move_delete_elem;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- cmd->num_grps_req = cpu_to_le16(grps_req);
-
- status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status && grps_del)
- *grps_del = le16_to_cpu(cmd->num_grps_updated);
-
- return status;
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
+ grps_req, (void *)buf, buf_size,
+ grps_del, cd);
}
/**
@@ -442,52 +453,9 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_add_elem *buf, u16 buf_size,
u16 *grps_added, struct ice_sq_cd *cd)
{
- struct ice_aqc_add_move_delete_elem *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- cmd = &desc.params.add_move_delete_elem;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- cmd->num_grps_req = cpu_to_le16(grps_req);
- status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status && grps_added)
- *grps_added = le16_to_cpu(cmd->num_grps_updated);
-
- return status;
-}
-
-/**
- * ice_suspend_resume_elems - suspend/resume scheduler elements
- * @hw: pointer to the hw struct
- * @elems_req: number of elements to suspend
- * @buf: pointer to buffer
- * @buf_size: buffer size in bytes
- * @elems_ret: returns total number of elements suspended
- * @cd: pointer to command details structure or NULL
- * @cmd_code: command code for suspend or resume
- *
- * suspend/resume scheduler elements
- */
-static enum ice_status
-ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_suspend_resume_elem *buf, u16 buf_size,
- u16 *elems_ret, struct ice_sq_cd *cd,
- enum ice_adminq_opc cmd_code)
-{
- struct ice_aqc_get_cfg_elem *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- cmd = &desc.params.get_update_elem;
- ice_fill_dflt_direct_cmd_desc(&desc, cmd_code);
- cmd->num_elem_req = cpu_to_le16(elems_req);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status && elems_ret)
- *elems_ret = le16_to_cpu(cmd->num_elem_resp);
- return status;
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
+ grps_req, (void *)buf, buf_size,
+ grps_added, cd);
}
/**
@@ -506,8 +474,9 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_suspend_resume_elem *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
- return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
- cd, ice_aqc_opc_suspend_sched_elems);
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_ret, cd);
}
/**
@@ -526,8 +495,9 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_suspend_resume_elem *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
- return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
- cd, ice_aqc_opc_resume_sched_elems);
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_ret, cd);
}
/**
@@ -591,23 +561,18 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
}
/**
- * ice_sched_clear_tx_topo - clears the schduler tree nodes
- * @pi: port information structure
+ * ice_sched_clear_agg - clears the agg related information
+ * @hw: pointer to the hardware structure
*
- * This function removes all the nodes from HW as well as from SW DB.
+ * This function removes agg list and free up agg related memory
+ * previously allocated.
*/
-static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
+void ice_sched_clear_agg(struct ice_hw *hw)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
- struct ice_hw *hw;
-
- if (!pi)
- return;
-
- hw = pi->hw;
- list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+ list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *vtmp;
@@ -616,8 +581,21 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
list_del(&agg_vsi_info->list_entry);
devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
}
+ list_del(&agg_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), agg_info);
}
+}
+/**
+ * ice_sched_clear_tx_topo - clears the scheduler tree nodes
+ * @pi: port information structure
+ *
+ * This function removes all the nodes from HW as well as from SW DB.
+ */
+static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
+{
+ if (!pi)
+ return;
if (pi->root) {
ice_free_sched_node(pi, pi->root);
pi->root = NULL;
@@ -1035,7 +1013,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
/* initialize the port for handling the scheduler tree */
pi->port_state = ICE_SCHED_PORT_STATE_READY;
mutex_init(&pi->sched_lock);
- INIT_LIST_HEAD(&pi->agg_list);
err_init_port:
if (status && pi->root) {
@@ -1618,7 +1595,8 @@ ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
- list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+ list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
+ list_entry) {
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *vtmp;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index da5b4c166da8..bee8221ad146 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -28,6 +28,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
+void ice_sched_clear_agg(struct ice_hw *hw);
+
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 533b989a23e1..d2db0d04e117 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -85,6 +85,12 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
case ICE_AQ_LINK_SPEED_40GB:
speed = ICE_LINK_SPEED_40000MBPS;
break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ speed = ICE_LINK_SPEED_50000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = ICE_LINK_SPEED_100000MBPS;
+ break;
default:
speed = ICE_LINK_SPEED_UNKNOWN;
break;
@@ -116,6 +122,9 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
break;
case ICE_AQ_LINK_SPEED_40GB:
/* fall through */
+ case ICE_AQ_LINK_SPEED_50GB:
+ /* fall through */
+ case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;
default:
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 49fc38094185..2357fcac996b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1053,6 +1053,69 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
}
/**
+ * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
+ * @itr_idx: interrupt throttling index
+ * @reg_itr: interrupt throttling value adjusted based on ITR granularity
+ */
+static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
+{
+ return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+ (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
+ (reg_itr << GLINT_DYN_CTL_INTERVAL_S);
+}
+
+/**
+ * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
+ * @vsi: the VSI associated with the q_vector
+ * @q_vector: q_vector for which ITR is being updated and interrupt enabled
+ */
+static void
+ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_ring_container *rc;
+ u32 itr_val;
+
+ /* This block of logic allows us to get away with only updating
+ * one ITR value with each interrupt. The idea is to perform a
+ * pseudo-lazy update with the following criteria.
+ *
+ * 1. Rx is given higher priority than Tx if both are in same state
+ * 2. If we must reduce an ITR that is given highest priority.
+ * 3. We then give priority to increasing ITR based on amount.
+ */
+ if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+ rc = &q_vector->rx;
+ /* Rx ITR needs to be reduced, this is highest priority */
+ itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
+ rc->current_itr = rc->target_itr;
+ } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+ ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
+ (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
+ rc = &q_vector->tx;
+ /* Tx ITR needs to be reduced, this is second priority
+ * Tx ITR needs to be increased more than Rx, fourth priority
+ */
+ itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
+ rc->current_itr = rc->target_itr;
+ } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+ rc = &q_vector->rx;
+ /* Rx ITR needs to be increased, third priority */
+ itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
+ rc->current_itr = rc->target_itr;
+ } else {
+ /* Still have to re-enable the interrupts */
+ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ }
+
+ if (!test_bit(__ICE_DOWN, vsi->state)) {
+ int vector = vsi->hw_base_vector + q_vector->v_idx;
+
+ wr32(hw, GLINT_DYN_CTL(vector), itr_val);
+ }
+}
+
+/**
* ice_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
@@ -1108,7 +1171,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
+ ice_update_ena_itr(vsi, q_vector);
return min(work_done, budget - 1);
}
@@ -1402,6 +1465,12 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
break;
case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+ l4_len = sizeof(struct sctphdr) >> 2;
+ offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+
default:
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return -1;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 75d0eaf6c9dd..fc358ea81816 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -116,16 +116,17 @@ enum ice_rx_dtype {
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
-#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
-#define ICE_ITR_8K 125
+#define ICE_ITR_8K 124
#define ICE_ITR_20K 50
-#define ICE_DFLT_TX_ITR ICE_ITR_20K
-#define ICE_DFLT_RX_ITR ICE_ITR_20K
-/* apply ITR granularity translation to program the register. itr_gran is either
- * 2 or 4 usecs so we need to divide by 2 first then shift by that value
- */
-#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \
- ((itr_gran) / 2))
+#define ICE_ITR_MAX 8160
+#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
+#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
+#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
+#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
+#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
+#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
+#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
#define ICE_DFLT_INTRL 0
@@ -180,13 +181,20 @@ enum ice_latency_range {
};
struct ice_ring_container {
- /* array of pointers to rings */
+ /* head of linked-list of rings */
struct ice_ring *ring;
+ unsigned long next_update; /* jiffies value of next queue update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range;
- int itr_idx; /* index in the interrupt vector */
- u16 itr;
+ int itr_idx; /* index in the interrupt vector */
+ u16 target_itr; /* value in usecs divided by the hw->itr_gran */
+ u16 current_itr; /* value in usecs divided by the hw->itr_gran */
+ /* high bit set means dynamic ITR, rest is used to store user
+ * readable ITR value in usecs and must be converted before programming
+ * to a register.
+ */
+ u16 itr_setting;
};
/* iterator for handling rings in ring container */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 0ea428104215..17086d5b5c33 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -90,6 +90,7 @@ enum ice_vsi_type {
struct ice_link_status {
/* Refer to ice_aq_phy_type for bits definition */
u64 phy_type_low;
+ u64 phy_type_high;
u16 max_frame_size;
u16 link_speed;
u16 req_speeds;
@@ -118,6 +119,7 @@ struct ice_phy_info {
struct ice_link_status link_info;
struct ice_link_status link_info_old;
u64 phy_type_low;
+ u64 phy_type_high;
enum ice_media_type media_type;
u8 get_link_info;
};
@@ -272,7 +274,6 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
- struct list_head agg_list; /* lists all aggregator */
u8 lport;
#define ICE_LPORT_MASK 0xff
u8 is_vf;
@@ -326,6 +327,7 @@ struct ice_hw {
u8 max_cgds;
u8 sw_entry_point_layer;
u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ struct list_head agg_list; /* lists all aggregator */
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 05ff4f910649..80b50e67cbef 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -224,13 +224,15 @@ void ice_free_vfs(struct ice_pf *pf)
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
+ struct ice_vsi *vsi;
+
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
continue;
+ vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
/* stop rings without wait time */
- ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
- ICE_NO_RESET, i);
- ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i);
+ ice_vsi_stop_rx_rings(vsi);
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
@@ -831,6 +833,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi *vsi;
bool rsd = false;
u32 reg;
int i;
@@ -843,17 +846,18 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_trigger_vf_reset(vf, is_vflr);
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET,
- vf->vf_id);
- ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
+ ice_vsi_stop_rx_rings(vsi);
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
} else {
/* Call Disable LAN Tx queue AQ call even when queues are not
* enabled. This is needed for successful completiom of VFR
*/
- ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0,
- NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL);
+ ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET,
+ vf->vf_id, NULL);
}
/* poll VPGEN_VFRSTAT reg to make sure
@@ -1614,7 +1618,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
+ if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop tx rings on VSI %d\n",
vsi->vsi_num);
@@ -1784,7 +1788,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->num_txq = qci->num_queue_pairs;
vsi->num_rxq = qci->num_queue_pairs;
- if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
+ if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
aq_ret = 0;
else
aq_ret = ICE_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 7137e7f9c7f3..dfa357b1a9d6 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2486,7 +2486,8 @@ static int igb_set_features(struct net_device *netdev,
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index daff8183534b..b53087a980ef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9910,7 +9910,8 @@ static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index a5ab6f3403ae..763ee5281177 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2034,10 +2034,9 @@ static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
ctxbi->len,
PCI_DMA_TODEVICE);
- ctxbi->mapping = 0;
- ctxbi->len = 0;
+ ctxbi->mapping = 0;
+ ctxbi->len = 0;
}
-
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e65bc3c95630..c19e74e6ac94 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -3274,7 +3274,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
link_state, slave, port);
return -EINVAL;
- };
+ }
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->link_state = link_state;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 2df92dbd38e1..a5be27772b8e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -100,7 +100,7 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not)
req_not << 31),
eq->doorbell);
/* We still want ordering, just not swabbing, so add a barrier */
- mb();
+ wmb();
}
static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor,
@@ -558,6 +558,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
__func__, be32_to_cpu(eqe->event.srq.srqn),
eq->eqn);
+ /* fall through */
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -820,7 +821,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
- };
+ }
++eq->cons_index;
eqes_found = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 424457ff9759..8ecac81a385d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -258,6 +258,8 @@ const char *parse_fs_dst(struct trace_seq *p,
return ret;
}
+EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_ft);
+EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_ft);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_fg);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_fg);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_set_fte);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index d027ce00c8ce..a4cf123e3f17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -61,6 +61,41 @@ const char *parse_fs_dst(struct trace_seq *p,
const struct mlx5_flow_destination *dst,
u32 counter_id);
+TRACE_EVENT(mlx5_fs_add_ft,
+ TP_PROTO(const struct mlx5_flow_table *ft),
+ TP_ARGS(ft),
+ TP_STRUCT__entry(
+ __field(const struct mlx5_flow_table *, ft)
+ __field(u32, id)
+ __field(u32, level)
+ __field(u32, type)
+ ),
+ TP_fast_assign(
+ __entry->ft = ft;
+ __entry->id = ft->id;
+ __entry->level = ft->level;
+ __entry->type = ft->type;
+ ),
+ TP_printk("ft=%p id=%u level=%u type=%u \n",
+ __entry->ft, __entry->id, __entry->level, __entry->type)
+ );
+
+TRACE_EVENT(mlx5_fs_del_ft,
+ TP_PROTO(const struct mlx5_flow_table *ft),
+ TP_ARGS(ft),
+ TP_STRUCT__entry(
+ __field(const struct mlx5_flow_table *, ft)
+ __field(u32, id)
+ ),
+ TP_fast_assign(
+ __entry->ft = ft;
+ __entry->id = ft->id;
+
+ ),
+ TP_printk("ft=%p id=%u\n",
+ __entry->ft, __entry->id)
+ );
+
TRACE_EVENT(mlx5_fs_add_fg,
TP_PROTO(const struct mlx5_flow_group *fg),
TP_ARGS(fg),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8fa8fdd30b85..6dd74ef69389 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -76,15 +76,14 @@ struct page_pool;
#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MLX5E_RX_MAX_HEAD (256)
+
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
-#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
-#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
-#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
- (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
- MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
+#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
+ MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -119,8 +118,6 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
-#define MLX5E_RX_MAX_HEAD (256)
-
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
@@ -309,16 +306,18 @@ struct mlx5e_cq {
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_cq_decomp {
/* cqe decompression */
struct mlx5_cqe64 title;
struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
u8 mini_arr_idx;
- u16 decmprs_left;
- u16 decmprs_wqe_counter;
-
- /* control */
- struct mlx5_core_dev *mdev;
- struct mlx5_wq_ctrl wq_ctrl;
+ u16 left;
+ u16 wqe_counter;
} ____cacheline_aligned_in_smp;
struct mlx5e_tx_wqe_info {
@@ -581,6 +580,7 @@ struct mlx5e_rq {
struct net_device *netdev;
struct mlx5e_rq_stats *stats;
struct mlx5e_cq cq;
+ struct mlx5e_cq_decomp cqd;
struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
@@ -638,6 +638,7 @@ struct mlx5e_channel {
struct hwtstamp_config *tstamp;
int ix;
int cpu;
+ cpumask_var_t xps_cpumask;
};
struct mlx5e_channels {
@@ -803,6 +804,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_update_stats(struct mlx5e_priv *priv);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 01819e5c9975..099d307e6f25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -171,8 +171,7 @@ static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
- return MLX5E_MPWQE_STRIDE_SZ(mdev,
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
+ return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
@@ -1950,6 +1949,29 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
+static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c,
+ struct mlx5e_params *params)
+{
+ int num_comp_vectors = mlx5_comp_vectors_count(c->mdev);
+ int irq;
+
+ if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) {
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
+
+ cpumask_set_cpu(cpu, c->xps_cpumask);
+ }
+
+ return 0;
+}
+
+static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c)
+{
+ free_cpumask_var(c->xps_cpumask);
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -1982,9 +2004,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
-
c->irq_desc = irq_to_desc(irq);
+ err = mlx5e_alloc_xps_cpumask(c, params);
+ if (err)
+ goto err_free_channel;
+
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
@@ -2067,6 +2092,9 @@ err_close_icosq_cq:
err_napi_del:
netif_napi_del(&c->napi);
+ mlx5e_free_xps_cpumask(c);
+
+err_free_channel:
kvfree(c);
return err;
@@ -2079,7 +2107,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_rq(&c->rq);
- netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
+ netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2107,6 +2135,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
+ mlx5e_free_xps_cpumask(c);
kvfree(c);
}
@@ -3492,11 +3521,32 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
+{
+ int i;
+
+ for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
+ struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
+ struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
+ int j;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+
+ for (j = 0; j < priv->max_opened_tc; j++) {
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
+
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ s->tx_dropped += sq_stats->dropped;
+ }
+ }
+}
+
void
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_sw_stats *sstats = &priv->stats.sw;
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
@@ -3511,12 +3561,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
} else {
- mlx5e_grp_sw_update_stats(priv);
- stats->rx_packets = sstats->rx_packets;
- stats->rx_bytes = sstats->rx_bytes;
- stats->tx_packets = sstats->tx_packets;
- stats->tx_bytes = sstats->tx_bytes;
- stats->tx_dropped = sstats->tx_queue_dropped;
+ mlx5e_fold_sw_stats64(priv, stats);
}
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f2573c2d2b5c..5d2e0c2f6624 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -162,27 +162,16 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
- struct mlx5e_rq_stats *rq_stats;
- struct mlx5e_sq_stats *sq_stats;
- int i, j;
+ struct rtnl_link_stats64 stats64 = {};
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->channels.num; i++) {
- struct mlx5e_channel *c = priv->channels.c[i];
-
- rq_stats = c->rq.stats;
-
- s->rx_packets += rq_stats->packets;
- s->rx_bytes += rq_stats->bytes;
+ mlx5e_fold_sw_stats64(priv, &stats64);
- for (j = 0; j < priv->channels.params.num_tc; j++) {
- sq_stats = c->sq[j].stats;
-
- s->tx_packets += sq_stats->packets;
- s->tx_bytes += sq_stats->bytes;
- s->tx_queue_dropped += sq_stats->dropped;
- }
- }
+ s->rx_packets = stats64.rx_packets;
+ s->rx_bytes = stats64.rx_bytes;
+ s->tx_packets = stats64.tx_packets;
+ s->tx_bytes = stats64.tx_bytes;
+ s->tx_queue_dropped = stats64.tx_dropped;
}
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
@@ -195,8 +184,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
return;
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_rep_update_sw_counters(priv);
+ mlx5e_rep_update_sw_counters(priv);
mlx5e_rep_update_hw_counters(priv);
mutex_unlock(&priv->state_lock);
@@ -1237,17 +1225,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_sw_stats *sstats = &priv->stats.sw;
-
- mlx5e_rep_update_sw_counters(priv);
-
- stats->rx_packets = sstats->rx_packets;
- stats->rx_bytes = sstats->rx_bytes;
- stats->tx_packets = sstats->tx_packets;
- stats->tx_bytes = sstats->tx_bytes;
-
- stats->tx_dropped = sstats->tx_queue_dropped;
+ mlx5e_fold_sw_stats64(priv, stats);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index f86e4804e83e..be396e5e4e39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -52,40 +52,45 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
return config->rx_filter == HWTSTAMP_FILTER_ALL;
}
-static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
- void *data)
+static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
+ u32 cqcc, void *data)
{
- u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc);
+ u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
- memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
+ memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
}
static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq, u32 cqcc)
+ struct mlx5_cqwq *wq,
+ u32 cqcc)
{
- mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
- cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt);
- cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ struct mlx5_cqe64 *title = &cqd->title;
+
+ mlx5e_read_cqe_slot(wq, cqcc, title);
+ cqd->left = be32_to_cpu(title->byte_cnt);
+ cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
rq->stats->cqe_compress_blks++;
}
-static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
+static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
+ struct mlx5e_cq_decomp *cqd,
+ u32 cqcc)
{
- mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
- cq->mini_arr_idx = 0;
+ mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
+ cqd->mini_arr_idx = 0;
}
-static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
+static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
{
- struct mlx5_cqwq *wq = &cq->wq;
-
+ u32 cqcc = wq->cc;
u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
u32 wq_sz = mlx5_cqwq_get_size(wq);
u32 ci_top = min_t(u32, wq_sz, ci + n);
for (; ci < ci_top; ci++, n--) {
- struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
cqe->op_own = op_own;
}
@@ -93,7 +98,7 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
if (unlikely(ci == wq_sz)) {
op_own = !op_own;
for (ci = 0; ci < n; ci++) {
- struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
cqe->op_own = op_own;
}
@@ -101,68 +106,79 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
}
static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq, u32 cqcc)
+ struct mlx5_cqwq *wq,
+ u32 cqcc)
{
- cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
- cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
- cq->title.op_own &= 0xf0;
- cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz);
- cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
+ struct mlx5_cqe64 *title = &cqd->title;
+
+ title->byte_cnt = mini_cqe->byte_cnt;
+ title->check_sum = mini_cqe->checksum;
+ title->op_own &= 0xf0;
+ title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
+ title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- cq->decmprs_wqe_counter +=
- mpwrq_get_cqe_consumed_strides(&cq->title);
+ cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
else
- cq->decmprs_wqe_counter =
- mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1);
+ cqd->wqe_counter =
+ mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
}
static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq, u32 cqcc)
+ struct mlx5_cqwq *wq,
+ u32 cqcc)
{
- mlx5e_decompress_cqe(rq, cq, cqcc);
- cq->title.rss_hash_type = 0;
- cq->title.rss_hash_result = 0;
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+
+ mlx5e_decompress_cqe(rq, wq, cqcc);
+ cqd->title.rss_hash_type = 0;
+ cqd->title.rss_hash_result = 0;
}
static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq,
+ struct mlx5_cqwq *wq,
int update_owner_only,
int budget_rem)
{
- u32 cqcc = cq->wq.cc + update_owner_only;
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ u32 cqcc = wq->cc + update_owner_only;
u32 cqe_count;
u32 i;
- cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
+ cqe_count = min_t(u32, cqd->left, budget_rem);
for (i = update_owner_only; i < cqe_count;
- i++, cq->mini_arr_idx++, cqcc++) {
- if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
- mlx5e_read_mini_arr_slot(cq, cqcc);
+ i++, cqd->mini_arr_idx++, cqcc++) {
+ if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
+ mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
- mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
- rq->handle_rx_cqe(rq, &cq->title);
+ mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
+ rq->handle_rx_cqe(rq, &cqd->title);
}
- mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
- cq->wq.cc = cqcc;
- cq->decmprs_left -= cqe_count;
+ mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
+ wq->cc = cqcc;
+ cqd->left -= cqe_count;
rq->stats->cqe_compress_pkts += cqe_count;
return cqe_count;
}
static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq,
+ struct mlx5_cqwq *wq,
int budget_rem)
{
- mlx5e_read_title_slot(rq, cq, cq->wq.cc);
- mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
- mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
- rq->handle_rx_cqe(rq, &cq->title);
- cq->mini_arr_idx++;
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ u32 cc = wq->cc;
- return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
+ mlx5e_read_title_slot(rq, wq, cc);
+ mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
+ mlx5e_decompress_cqe(rq, wq, cc);
+ rq->handle_rx_cqe(rq, &cqd->title);
+ cqd->mini_arr_idx++;
+
+ return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
}
static inline bool mlx5e_page_is_reserved(struct page *page)
@@ -369,7 +385,7 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
static inline void
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
struct mlx5e_dma_info *dma_info,
- int offset_from, int offset_to, u32 headlen)
+ int offset_from, u32 headlen)
{
const void *from = page_address(dma_info->page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
@@ -377,24 +393,7 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
DMA_FROM_DEVICE);
- skb_copy_to_linear_data_offset(skb, offset_to, from, len);
-}
-
-static inline void
-mlx5e_copy_skb_header_mpwqe(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_dma_info *dma_info,
- u32 offset, u32 headlen)
-{
- u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
-
- mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg);
-
- if (unlikely(offset + headlen > PAGE_SIZE)) {
- dma_info++;
- mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg,
- headlen - headlen_pg);
- }
+ skb_copy_to_linear_data(skb, from, len);
}
static void
@@ -973,8 +972,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
}
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset,
- 0, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1096,8 +1094,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
di++;
}
/* copy header */
- mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di,
- head_offset, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1203,16 +1200,17 @@ mpwrq_cqe_out:
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ struct mlx5_cqwq *cqwq = &cq->wq;
struct mlx5_cqe64 *cqe;
int work_done = 0;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
- if (cq->decmprs_left)
- work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
+ if (rq->cqd.left)
+ work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
- cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ cqe = mlx5_cqwq_get_cqe(cqwq);
if (!cqe) {
if (unlikely(work_done))
goto out;
@@ -1222,21 +1220,21 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
do {
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
work_done +=
- mlx5e_decompress_cqes_start(rq, cq,
+ mlx5e_decompress_cqes_start(rq, cqwq,
budget - work_done);
continue;
}
- mlx5_cqwq_pop(&cq->wq);
+ mlx5_cqwq_pop(cqwq);
rq->handle_rx_cqe(rq, cqe);
- } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
if (rq->xdp_prog)
mlx5e_xdp_rx_poll_complete(rq);
- mlx5_cqwq_update_db_record(&cq->wq);
+ mlx5_cqwq_update_db_record(cqwq);
/* ensure cq space is freed before enabling more cqes */
wmb();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index d3fe48ff9da9..1a78e05cbba8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -127,9 +127,9 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
{
- struct mlx5e_sw_stats temp, *s = &temp;
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
int i;
memset(s, 0, sizeof(*s));
@@ -212,8 +212,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_cqes += sq_stats->cqes;
}
}
-
- memcpy(&priv->stats.sw, s, sizeof(*s));
}
static const struct counter_desc q_stats_desc[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index fe91ec06e3c7..4640d4f986f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -277,7 +277,6 @@ struct mlx5e_stats_grp {
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
extern const int mlx5e_num_stats_grps;
-void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index cae6c6d48984..74159d39dd66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2767,14 +2767,13 @@ err_free:
return err;
}
-static int
+static struct mlx5e_tc_flow *
__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
u16 flow_flags,
struct net_device *filter_dev,
struct mlx5_eswitch_rep *in_rep,
- struct mlx5_core_dev *in_mdev,
- struct mlx5e_tc_flow **__flow)
+ struct mlx5_core_dev *in_mdev)
{
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -2814,15 +2813,13 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- *__flow = flow;
-
- return 0;
+ return flow;
err_free:
kfree(flow);
kvfree(parse_attr);
out:
- return err;
+ return ERR_PTR(err);
}
static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
@@ -2855,11 +2852,13 @@ static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
in_mdev = priv->mdev;
parse_attr = flow->esw_attr->parse_attr;
- err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
- parse_attr->filter_dev,
- flow->esw_attr->in_rep, in_mdev, &peer_flow);
- if (err)
+ peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
+ parse_attr->filter_dev,
+ flow->esw_attr->in_rep, in_mdev);
+ if (IS_ERR(peer_flow)) {
+ err = PTR_ERR(peer_flow);
goto out;
+ }
flow->peer_flow = peer_flow;
flow->flags |= MLX5E_TC_FLOW_DUP;
@@ -2885,10 +2884,10 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int err;
- err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
- in_mdev, &flow);
- if (err)
- goto out;
+ flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
+ in_mdev);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
if (is_peer_flow_needed(flow)) {
err = mlx5e_tc_add_fdb_peer_flow(f, flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 79f122b45def..b2df7c3749fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -397,6 +397,7 @@ static void del_hw_flow_table(struct fs_node *node)
fs_get_obj(ft, node);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
+ trace_mlx5_fs_del_ft(ft);
if (node->active) {
err = root->cmds->destroy_flow_table(dev, ft);
@@ -1019,6 +1020,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
fs_prio->num_ft++;
up_write_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock);
+ trace_mlx5_fs_add_ft(ft);
return ft;
destroy_ft:
root->cmds->destroy_flow_table(root->dev, ft);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index 7a712b6b09ec..14c0c62f8e73 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#ifndef _MLXFW_H
#define _MLXFW_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 2cf89126fb23..240c027e5f07 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#define pr_fmt(fmt) "mlxfw: " fmt
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 993cb5ba934e..544344ac4894 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#define pr_fmt(fmt) "mlxfw_mfa2: " fmt
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
index 20472aa139cd..5bba6ad79d34 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#ifndef _MLXFW_MFA2_H
#define _MLXFW_MFA2_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
index f667942b1ea3..874c0a2474ae 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#ifndef _MLXFW_MFA2_FILE_H
#define _MLXFW_MFA2_FILE_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
index dd66737c033d..b001e5258091 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
@@ -1,36 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXFW_MFA2_FORMAT_H
#define _MLXFW_MFA2_FORMAT_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
index cc013e77b326..33c971190bba 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#ifndef _MLXFW_MFA2_TLV_H
#define _MLXFW_MFA2_TLV_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
index 0094b92a233b..017d68f1e123 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#define pr_fmt(fmt) "MFA2: " fmt
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
index 2c667894f3a2..633284eeded7 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
@@ -1,36 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXFW_MFA2_TLV_MULTI_H
#define _MLXFW_MFA2_TLV_MULTI_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 9b48dffc9f63..5f8066ab7d40 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5666,6 +5666,8 @@ enum mlxsw_reg_ritr_loopback_protocol {
MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4,
/* IPinIP IPv6 underlay Unicast */
MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6,
+ /* IPinIP generic - used for Spectrum-2 underlay RIF */
+ MLXSW_REG_RITR_LOOPBACK_GENERIC,
};
/* reg_ritr_loopback_protocol
@@ -5706,6 +5708,13 @@ MLXSW_ITEM32(reg, ritr, loopback_ipip_options, 0x10, 20, 4);
*/
MLXSW_ITEM32(reg, ritr, loopback_ipip_uvr, 0x10, 0, 16);
+/* reg_ritr_loopback_ipip_underlay_rif
+ * Underlay ingress router interface.
+ * Reserved for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, loopback_ipip_underlay_rif, 0x14, 0, 16);
+
/* reg_ritr_loopback_ipip_usip*
* Encapsulation Underlay source IP.
* Access: RW
@@ -5821,11 +5830,12 @@ static inline void
mlxsw_reg_ritr_loopback_ipip_common_pack(char *payload,
enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
enum mlxsw_reg_ritr_loopback_ipip_options options,
- u16 uvr_id, u32 gre_key)
+ u16 uvr_id, u16 underlay_rif, u32 gre_key)
{
mlxsw_reg_ritr_loopback_ipip_type_set(payload, ipip_type);
mlxsw_reg_ritr_loopback_ipip_options_set(payload, options);
mlxsw_reg_ritr_loopback_ipip_uvr_set(payload, uvr_id);
+ mlxsw_reg_ritr_loopback_ipip_underlay_rif_set(payload, underlay_rif);
mlxsw_reg_ritr_loopback_ipip_gre_key_set(payload, gre_key);
}
@@ -5833,12 +5843,12 @@ static inline void
mlxsw_reg_ritr_loopback_ipip4_pack(char *payload,
enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
enum mlxsw_reg_ritr_loopback_ipip_options options,
- u16 uvr_id, u32 usip, u32 gre_key)
+ u16 uvr_id, u16 underlay_rif, u32 usip, u32 gre_key)
{
mlxsw_reg_ritr_loopback_protocol_set(payload,
MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4);
mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options,
- uvr_id, gre_key);
+ uvr_id, underlay_rif, gre_key);
mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip);
}
@@ -7200,6 +7210,13 @@ MLXSW_ITEM32(reg, rtdp, type, 0x00, 28, 4);
*/
MLXSW_ITEM32(reg, rtdp, tunnel_index, 0x00, 0, 24);
+/* reg_rtdp_egress_router_interface
+ * Underlay egress router interface.
+ * Valid range is from 0 to cap_max_router_interfaces - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, egress_router_interface, 0x40, 0, 16);
+
/* IPinIP */
/* reg_rtdp_ipip_irif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 32519c93df17..a88169738b4a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4094,6 +4094,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
+ mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -4110,6 +4111,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a1c32a81b011..4fe0996c7cdd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -75,6 +75,11 @@ enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_MAX,
};
+struct mlxsw_sp_rif_ops;
+
+extern const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[];
+extern const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[];
+
enum mlxsw_sp_fid_type {
MLXSW_SP_FID_TYPE_8021Q,
MLXSW_SP_FID_TYPE_8021D,
@@ -161,6 +166,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
const struct mlxsw_sp_nve_ops **nve_ops_arr;
+ const struct mlxsw_sp_rif_ops **rif_ops_arr;
};
static inline struct mlxsw_sp_upper *
@@ -501,6 +507,9 @@ void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
const union mlxsw_sp_l3addr *ul_sip);
int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
u16 *vr_id);
+int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ u16 *ul_rif_index);
+void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index);
/* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type {
@@ -712,8 +721,7 @@ struct mlxsw_sp_acl_tcam_ops {
void *region_priv, void *chunk_priv,
void *entry_priv);
int (*entry_action_replace)(struct mlxsw_sp *mlxsw_sp,
- void *region_priv, void *chunk_priv,
- void *entry_priv,
+ void *region_priv, void *entry_priv,
struct mlxsw_sp_acl_rule_info *rulei);
int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *entry_priv,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
index fe270c1a26a6..6e444525713f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -194,8 +194,7 @@ static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp1_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
- void *region_priv, void *chunk_priv,
- void *entry_priv,
+ void *region_priv, void *entry_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index 234ab51916db..d380b3403960 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -212,18 +212,15 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp2_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
- void *region_priv, void *chunk_priv,
- void *entry_priv,
+ void *region_priv, void *entry_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
- struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
entry->act_block = rulei->act_block;
return mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp,
&region->aregion,
- &chunk->achunk,
&entry->aentry, rulei);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 695d33358988..a69e3462b65e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -742,8 +742,7 @@ int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
rulei = mlxsw_sp_acl_rule_rulei(rule);
rulei->act_block = afa_block;
- return ops->rule_action_replace(mlxsw_sp, ruleset->priv, rule->priv,
- rule->rulei);
+ return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
}
struct mlxsw_sp_acl_rule *
@@ -806,7 +805,7 @@ static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
msecs_to_jiffies(interval));
}
-static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
+static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
{
struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
rule_activity_update.dw.work);
@@ -885,7 +884,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
/* Create the delayed work for the rule activity_update */
INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
- mlxsw_sp_acl_rul_activity_update_work);
+ mlxsw_sp_acl_rule_activity_update_work);
acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 80fb268d51a5..40dc76a5c412 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -603,7 +603,6 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
int
mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry,
struct mlxsw_sp_acl_rule_info *rulei)
{
@@ -612,7 +611,6 @@ mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_acl_atcam_is_centry(aentry))
err = mlxsw_sp_acl_ctcam_entry_action_replace(mlxsw_sp,
&aregion->cregion,
- &achunk->cchunk,
&aentry->centry,
rulei);
else
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index 505b87846acc..f5c381dcb015 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -234,8 +234,8 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
* is 2^ACL_MAX_BF_LOG
*/
bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
- bf = kzalloc(sizeof(*bf) + bf_bank_size * num_erp_banks *
- sizeof(*bf->refcnt), GFP_KERNEL);
+ bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
+ GFP_KERNEL);
if (!bf)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index ac222833a5cf..05680a7e6c56 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -223,7 +223,6 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
- struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry,
struct mlxsw_sp_acl_rule_info *rulei)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index fe230acf92a9..11456e1f236f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -781,7 +781,6 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_entry *entry,
struct mlxsw_sp_acl_rule_info *rulei)
{
@@ -789,7 +788,7 @@ mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- return ops->entry_action_replace(mlxsw_sp, region->priv, chunk->priv,
+ return ops->entry_action_replace(mlxsw_sp, region->priv,
entry->priv, rulei);
}
@@ -863,15 +862,6 @@ struct mlxsw_sp_acl_tcam_flower_rule {
struct mlxsw_sp_acl_tcam_entry entry;
};
-struct mlxsw_sp_acl_tcam_mr_ruleset {
- struct mlxsw_sp_acl_tcam_chunk *chunk;
- struct mlxsw_sp_acl_tcam_group group;
-};
-
-struct mlxsw_sp_acl_tcam_mr_rule {
- struct mlxsw_sp_acl_tcam_entry entry;
-};
-
static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
@@ -955,7 +945,6 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
static int
mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv,
void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
@@ -986,6 +975,15 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
};
+struct mlxsw_sp_acl_tcam_mr_ruleset {
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+ struct mlxsw_sp_acl_tcam_group group;
+};
+
+struct mlxsw_sp_acl_tcam_mr_rule {
+ struct mlxsw_sp_acl_tcam_entry entry;
+};
+
static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
@@ -1084,14 +1082,13 @@ mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
static int
mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv, void *rule_priv,
+ void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
- struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
- return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &ruleset->group,
- &rule->entry, rulei);
+ return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &rule->entry,
+ rulei);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 0f1a9dee63de..10512b7c6d50 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -48,8 +48,7 @@ struct mlxsw_sp_acl_profile_ops {
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
- int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv, void *rule_priv,
+ int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
bool *activity);
@@ -126,7 +125,6 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_entry *centry);
int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
- struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry,
struct mlxsw_sp_acl_rule_info *rulei);
static inline unsigned int
@@ -224,7 +222,6 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_entry *aentry);
int mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry,
struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 41e607a14846..49933818c6f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -220,7 +220,7 @@ start_again:
for (; i < rif_count; i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
- if (!rif)
+ if (!rif || !mlxsw_sp_rif_dev(rif))
continue;
err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, rif,
counters_enabled);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 00db26c96bf5..6400cd644b7a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -145,6 +145,7 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
char rtdp_pl[MLXSW_REG_RTDP_LEN];
struct ip_tunnel_parm parms;
unsigned int type_check;
@@ -157,6 +158,7 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
ikey = mlxsw_sp_ipip_parms4_ikey(parms);
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
type_check = has_ikey ?
MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY :
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index fb1c48c698f2..1df164a4b06d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -267,8 +267,8 @@ mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nve_mc_record *mc_record;
int err;
- mc_record = kzalloc(sizeof(*mc_record) + num_max_entries *
- sizeof(struct mlxsw_sp_nve_mc_entry), GFP_KERNEL);
+ mc_record = kzalloc(struct_size(mc_record, entries, num_max_entries),
+ GFP_KERNEL);
if (!mc_record)
return ERR_PTR(-ENOMEM);
@@ -841,11 +841,9 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
nve->config = config;
- err = ops->fdb_replay(params->dev, params->vni);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to offload the FDB");
+ err = ops->fdb_replay(params->dev, params->vni, extack);
+ if (err)
goto err_fdb_replay;
- }
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 02937ea95bc3..0035640156a1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -28,6 +28,7 @@ struct mlxsw_sp_nve {
unsigned int num_nve_tunnels; /* Protected by RTNL */
unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
u32 tunnel_index;
+ u16 ul_rif_index; /* Reserved for Spectrum */
};
struct mlxsw_sp_nve_ops {
@@ -41,7 +42,8 @@ struct mlxsw_sp_nve_ops {
int (*init)(struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_config *config);
void (*fini)(struct mlxsw_sp_nve *nve);
- int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni);
+ int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni,
+ struct netlink_ext_ack *extack);
void (*fdb_clear_offload)(const struct net_device *nve_dev, __be32 vni);
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 74e564c4ac19..93ccd9fc2266 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -7,6 +7,7 @@
#include <net/vxlan.h>
#include "reg.h"
+#include "spectrum.h"
#include "spectrum_nve.h"
/* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B)
@@ -20,9 +21,9 @@
#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
VXLAN_F_LEARN)
-static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
- struct netlink_ext_ack *extack)
+static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_config *cfg = &vxlan->cfg;
@@ -112,13 +113,30 @@ static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
}
+static void
+mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
+ const struct mlxsw_sp_nve_config *config)
+{
+ u8 udp_sport;
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
+ config->ttl);
+ /* VxLAN driver's default UDP source port range is 32768 (0x8000)
+ * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
+ * to a random number between 0x80 and 0xee
+ */
+ get_random_bytes(&udp_sport, sizeof(udp_sport));
+ udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
+ mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
+ mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
+}
+
static int
mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nve_config *config)
{
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
u16 ul_vr_id;
- u8 udp_sport;
int err;
err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
@@ -126,18 +144,9 @@ mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
- config->ttl);
- /* VxLAN driver's default UDP source port range is 32768 (0x8000)
- * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
- * to a random number between 0x80 and 0xee
- */
- get_random_bytes(&udp_sport, sizeof(udp_sport));
- udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
- mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
+ mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
- mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
}
@@ -212,11 +221,13 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
}
static int
-mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni)
+mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni,
+ struct netlink_ext_ack *extack)
{
if (WARN_ON(!netif_is_vxlan(nve_dev)))
return -EINVAL;
- return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier);
+ return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier,
+ extack);
}
static void
@@ -229,7 +240,7 @@ mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN,
- .can_offload = mlxsw_sp1_nve_vxlan_can_offload,
+ .can_offload = mlxsw_sp_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp1_nve_vxlan_init,
.fini = mlxsw_sp1_nve_vxlan_fini,
@@ -237,26 +248,126 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
};
-static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
- struct netlink_ext_ack *extack)
+static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
+ bool learning_en)
{
- return false;
+ char tnpc_pl[MLXSW_REG_TNPC_LEN];
+
+ mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
+ learning_en);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
+}
+
+static int
+mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_config *config)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ u16 ul_rif_index;
+ int err;
+
+ err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
+ &ul_rif_index);
+ if (err)
+ return err;
+ mlxsw_sp->nve->ul_rif_index = ul_rif_index;
+
+ err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
+ if (err)
+ goto err_vxlan_learning_set;
+
+ mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
+ mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+ if (err)
+ goto err_tngcr_write;
+
+ return 0;
+
+err_tngcr_write:
+ mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
+err_vxlan_learning_set:
+ mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
+ return err;
+}
+
+static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+ mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
+ mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
+}
+
+static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int tunnel_index,
+ u16 ul_rif_index)
+{
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
}
static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_config *config)
{
- return -EOPNOTSUPP;
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+ int err;
+
+ err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
+ MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
+ config->udp_dport);
+ if (err)
+ return err;
+
+ err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
+ if (err)
+ goto err_config_set;
+
+ err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
+ nve->ul_rif_index);
+ if (err)
+ goto err_rtdp_set;
+
+ err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto,
+ &config->ul_sip,
+ nve->tunnel_index);
+ if (err)
+ goto err_promote_decap;
+
+ return 0;
+
+err_promote_decap:
+err_rtdp_set:
+ mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
+err_config_set:
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
+ config->udp_dport);
+ return err;
}
static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
{
+ struct mlxsw_sp_nve_config *config = &nve->config;
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+
+ mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto, &config->ul_sip);
+ mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
+ config->udp_dport);
}
const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN,
- .can_offload = mlxsw_sp2_nve_vxlan_can_offload,
+ .can_offload = mlxsw_sp_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp2_nve_vxlan_init,
.fini = mlxsw_sp2_nve_vxlan_fini,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 98e5ffd71b91..230e1f6e192b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -80,7 +80,7 @@ struct mlxsw_sp_router {
struct mlxsw_sp_rif {
struct list_head nexthop_list;
struct list_head neigh_list;
- struct net_device *dev;
+ struct net_device *dev; /* NULL for underlay RIF */
struct mlxsw_sp_fid *fid;
unsigned char addr[ETH_ALEN];
int mtu;
@@ -120,6 +120,7 @@ struct mlxsw_sp_rif_ipip_lb {
struct mlxsw_sp_rif common;
struct mlxsw_sp_rif_ipip_lb_config lb_config;
u16 ul_vr_id; /* Reserved for Spectrum-2. */
+ u16 ul_rif_id; /* Reserved for Spectrum. */
};
struct mlxsw_sp_rif_params_ipip_lb {
@@ -440,6 +441,8 @@ struct mlxsw_sp_vr {
struct mlxsw_sp_fib *fib4;
struct mlxsw_sp_fib *fib6;
struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
+ struct mlxsw_sp_rif *ul_rif;
+ refcount_t ul_rif_refcnt;
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
@@ -1437,8 +1440,8 @@ mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
- struct mlxsw_sp_vr *ul_vr, bool enable)
+mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
+ u16 ul_rif_id, bool enable)
{
struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
struct mlxsw_sp_rif *rif = &lb_rif->common;
@@ -1453,7 +1456,7 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
- ul_vr->id, saddr4, lb_cf.okey);
+ ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
break;
case MLXSW_SP_L3_PROTO_IPV6:
@@ -1468,14 +1471,13 @@ static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif_ipip_lb *lb_rif;
- struct mlxsw_sp_vr *ul_vr;
int err = 0;
ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
if (ipip_entry) {
lb_rif = ipip_entry->ol_lb;
- ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
- err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
+ lb_rif->ul_rif_id, true);
if (err)
goto out;
lb_rif->common.mtu = ol_dev->mtu;
@@ -6224,10 +6226,12 @@ static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
INIT_LIST_HEAD(&rif->nexthop_list);
INIT_LIST_HEAD(&rif->neigh_list);
- ether_addr_copy(rif->addr, l3_dev->dev_addr);
- rif->mtu = l3_dev->mtu;
+ if (l3_dev) {
+ ether_addr_copy(rif->addr, l3_dev->dev_addr);
+ rif->mtu = l3_dev->mtu;
+ rif->dev = l3_dev;
+ }
rif->vr_id = vr_id;
- rif->dev = l3_dev;
rif->rif_index = rif_index;
return rif;
@@ -6251,7 +6255,19 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
- return lb_rif->ul_vr_id;
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
+ struct mlxsw_sp_vr *ul_vr;
+
+ ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
+ if (WARN_ON(IS_ERR(ul_vr)))
+ return 0;
+
+ return ul_vr->id;
+}
+
+u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
+{
+ return lb_rif->ul_rif_id;
}
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
@@ -6284,7 +6300,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
int i, err;
type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
- ops = mlxsw_sp->router->rif_ops_arr[type];
+ ops = mlxsw_sp->rif_ops_arr[type];
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
if (IS_ERR(vr))
@@ -6303,6 +6319,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_rif_alloc;
}
dev_hold(rif->dev);
+ mlxsw_sp->router->rifs[rif_index] = rif;
rif->mlxsw_sp = mlxsw_sp;
rif->ops = ops;
@@ -6329,7 +6346,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_rif_counters_alloc(rif);
- mlxsw_sp->router->rifs[rif_index] = rif;
return rif;
@@ -6341,6 +6357,7 @@ err_configure:
if (fid)
mlxsw_sp_fid_put(fid);
err_fid_get:
+ mlxsw_sp->router->rifs[rif_index] = NULL;
dev_put(rif->dev);
kfree(rif);
err_rif_alloc:
@@ -6361,7 +6378,6 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- mlxsw_sp->router->rifs[rif->rif_index] = NULL;
mlxsw_sp_rif_counters_free(rif);
for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
@@ -6369,6 +6385,7 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
if (fid)
/* Loopback RIFs are not associated with a FID. */
mlxsw_sp_fid_put(fid);
+ mlxsw_sp->router->rifs[rif->rif_index] = NULL;
dev_put(rif->dev);
kfree(rif);
vr->rif_count--;
@@ -6750,7 +6767,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
rif = mlxsw_sp->router->rifs[i];
- if (rif && rif->dev != dev &&
+ if (rif && rif->dev && rif->dev != dev &&
!ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
mlxsw_sp->mac_mask)) {
NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
@@ -7294,7 +7311,8 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
info.addr = mac;
info.vid = vid;
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
+ NULL);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
@@ -7381,7 +7399,8 @@ static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
info.addr = mac;
info.vid = 0;
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
+ NULL);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
@@ -7422,7 +7441,7 @@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
}
static int
-mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
+mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
@@ -7434,11 +7453,12 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
if (IS_ERR(ul_vr))
return PTR_ERR(ul_vr);
- err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
if (err)
goto err_loopback_op;
lb_rif->ul_vr_id = ul_vr->id;
+ lb_rif->ul_rif_id = 0;
++ul_vr->rif_count;
return 0;
@@ -7447,32 +7467,213 @@ err_loopback_op:
return err;
}
-static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
+static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_vr *ul_vr;
ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
- mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
+ mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
--ul_vr->rif_count;
mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
.type = MLXSW_SP_RIF_TYPE_IPIP_LB,
.rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
.setup = mlxsw_sp_rif_ipip_lb_setup,
- .configure = mlxsw_sp_rif_ipip_lb_configure,
- .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
+ .configure = mlxsw_sp1_rif_ipip_lb_configure,
+ .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
};
-static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
+const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
- [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
+ [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
+};
+
+static int
+mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
+ mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
+ MLXSW_REG_RITR_LOOPBACK_GENERIC);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif *ul_rif;
+ u16 rif_index;
+ int err;
+
+ err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
+ return ERR_PTR(err);
+ }
+
+ ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
+ if (!ul_rif)
+ return ERR_PTR(-ENOMEM);
+
+ mlxsw_sp->router->rifs[rif_index] = ul_rif;
+ ul_rif->mlxsw_sp = mlxsw_sp;
+ err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
+ if (err)
+ goto ul_rif_op_err;
+
+ return ul_rif;
+
+ul_rif_op_err:
+ mlxsw_sp->router->rifs[rif_index] = NULL;
+ kfree(ul_rif);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
+{
+ struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+
+ mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
+ mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
+ kfree(ul_rif);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
+ return vr->ul_rif;
+
+ vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
+ if (IS_ERR(vr->ul_rif)) {
+ err = PTR_ERR(vr->ul_rif);
+ goto err_ul_rif_create;
+ }
+
+ vr->rif_count++;
+ refcount_set(&vr->ul_rif_refcnt, 1);
+
+ return vr->ul_rif;
+
+err_ul_rif_create:
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
+{
+ struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ struct mlxsw_sp_vr *vr;
+
+ vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
+
+ if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
+ return;
+
+ vr->rif_count--;
+ mlxsw_sp_ul_rif_destroy(ul_rif);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ u16 *ul_rif_index)
+{
+ struct mlxsw_sp_rif *ul_rif;
+
+ ASSERT_RTNL();
+
+ ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
+ if (IS_ERR(ul_rif))
+ return PTR_ERR(ul_rif);
+ *ul_rif_index = ul_rif->rif_index;
+
+ return 0;
+}
+
+void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
+{
+ struct mlxsw_sp_rif *ul_rif;
+
+ ASSERT_RTNL();
+
+ ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
+ if (WARN_ON(!ul_rif))
+ return;
+
+ mlxsw_sp_ul_rif_put(ul_rif);
+}
+
+static int
+mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif *ul_rif;
+ int err;
+
+ ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
+ if (IS_ERR(ul_rif))
+ return PTR_ERR(ul_rif);
+
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
+ if (err)
+ goto err_loopback_op;
+
+ lb_rif->ul_vr_id = 0;
+ lb_rif->ul_rif_id = ul_rif->rif_index;
+
+ return 0;
+
+err_loopback_op:
+ mlxsw_sp_ul_rif_put(ul_rif);
+ return err;
+}
+
+static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif *ul_rif;
+
+ ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
+ mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
+ mlxsw_sp_ul_rif_put(ul_rif);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
+ .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
+ .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
+ .setup = mlxsw_sp_rif_ipip_lb_setup,
+ .configure = mlxsw_sp2_rif_ipip_lb_configure,
+ .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
+};
+
+const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
+ [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
+ [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
@@ -7485,8 +7686,6 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->router->rifs)
return -ENOMEM;
- mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 3dbafdeaab2b..cc1de91e8217 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -29,6 +29,7 @@ struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
+u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index c772109b638d..a4a9fe992193 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2027,6 +2027,7 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
return 0;
if (mlxsw_sp_fid_vni_is_set(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
err = -EINVAL;
goto err_vni_exists;
}
@@ -2213,10 +2214,13 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
int err;
fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
- if (!fid)
+ if (!fid) {
+ NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID");
return -EINVAL;
+ }
if (mlxsw_sp_fid_vni_is_set(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
err = -EINVAL;
goto err_vni_exists;
}
@@ -2443,7 +2447,7 @@ static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
ether_addr_copy(info.eth_addr, mac);
info.vni = vni;
info.offloaded = adding;
- call_switchdev_notifiers(type, dev, &info.info);
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
}
static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
@@ -2468,7 +2472,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
info.addr = mac;
info.vid = vid;
info.offloaded = offloaded;
- call_switchdev_notifiers(type, dev, &info.info);
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
}
static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
@@ -2819,7 +2823,7 @@ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
return;
vxlan_fdb_info.offloaded = true;
call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
- &vxlan_fdb_info.info);
+ &vxlan_fdb_info.info, NULL);
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
vxlan_fdb_info.eth_addr,
fdb_info->vid, dev, true);
@@ -2832,7 +2836,7 @@ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
false);
vxlan_fdb_info.offloaded = false;
call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
- &vxlan_fdb_info.info);
+ &vxlan_fdb_info.info, NULL);
break;
}
}
@@ -2977,7 +2981,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
}
vxlan_fdb_info->offloaded = true;
call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
- &vxlan_fdb_info->info);
+ &vxlan_fdb_info->info, NULL);
mlxsw_sp_fid_put(fid);
return;
}
@@ -2998,7 +3002,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
goto err_fdb_tunnel_uc_op;
vxlan_fdb_info->offloaded = true;
call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
- &vxlan_fdb_info->info);
+ &vxlan_fdb_info->info, NULL);
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
vxlan_fdb_info->eth_addr, vid, dev, true);
@@ -3099,23 +3103,34 @@ mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
struct vxlan_config *cfg = &vxlan->cfg;
+ struct netlink_ext_ack *extack;
+ extack = switchdev_notifier_info_to_extack(info);
vxlan_fdb_info = container_of(info,
struct switchdev_notifier_vxlan_fdb_info,
info);
- if (vxlan_fdb_info->remote_port != cfg->dst_port)
- return -EOPNOTSUPP;
- if (vxlan_fdb_info->remote_vni != cfg->vni)
+ if (vxlan_fdb_info->remote_port != cfg->dst_port) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
return -EOPNOTSUPP;
- if (vxlan_fdb_info->vni != cfg->vni)
+ }
+ if (vxlan_fdb_info->remote_vni != cfg->vni ||
+ vxlan_fdb_info->vni != cfg->vni) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
return -EOPNOTSUPP;
- if (vxlan_fdb_info->remote_ifindex)
+ }
+ if (vxlan_fdb_info->remote_ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
return -EOPNOTSUPP;
- if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
+ }
+ if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
return -EOPNOTSUPP;
- if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
+ }
+ if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
return -EOPNOTSUPP;
+ }
switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
@@ -3220,8 +3235,10 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
* the lookup function to return 'vxlan_dev'
*/
if (flag_untagged && flag_pvid &&
- mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
+ mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
return -EINVAL;
+ }
if (!netif_running(vxlan_dev))
return 0;
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 07c1eb63415a..3a0b289d9771 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -14,61 +14,138 @@
#define EEPROM_INDICATOR_1 (0xA5)
#define EEPROM_INDICATOR_2 (0xAA)
#define EEPROM_MAC_OFFSET (0x01)
-#define MAX_EEPROM_SIZE 512
+#define MAX_EEPROM_SIZE (512)
+#define MAX_OTP_SIZE (1024)
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
-static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
- u32 length, u8 *data)
+static int lan743x_otp_power_up(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, OTP_PWR_DN);
+
+ if (reg_value & OTP_PWR_DN_PWRDN_N_) {
+ /* clear it and wait to be cleared */
+ reg_value &= ~OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, OTP_PWR_DN, reg_value);
+
+ usleep_range(100, 20000);
+ }
+
+ return 0;
+}
+
+static void lan743x_otp_power_down(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, OTP_PWR_DN);
+ if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) {
+ /* set power down bit */
+ reg_value |= OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, OTP_PWR_DN, reg_value);
+ }
+}
+
+static void lan743x_otp_set_address(struct lan743x_adapter *adapter,
+ u32 address)
+{
+ lan743x_csr_write(adapter, OTP_ADDR_HIGH, (address >> 8) & 0x03);
+ lan743x_csr_write(adapter, OTP_ADDR_LOW, address & 0xFF);
+}
+
+static void lan743x_otp_read_go(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_);
+}
+
+static int lan743x_otp_wait_till_not_busy(struct lan743x_adapter *adapter)
{
unsigned long timeout;
- u32 buf;
+ u32 reg_val;
+
+ timeout = jiffies + HZ;
+ do {
+ if (time_after(jiffies, timeout)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Timeout on OTP_STATUS completion\n");
+ return -EIO;
+ }
+ udelay(1);
+ reg_val = lan743x_csr_read(adapter, OTP_STATUS);
+ } while (reg_val & OTP_STATUS_BUSY_);
+
+ return 0;
+}
+
+static int lan743x_otp_read(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
int i;
- buf = lan743x_csr_read(adapter, OTP_PWR_DN);
+ if (offset + length > MAX_OTP_SIZE)
+ return -EINVAL;
- if (buf & OTP_PWR_DN_PWRDN_N_) {
- /* clear it and wait to be cleared */
- lan743x_csr_write(adapter, OTP_PWR_DN, 0);
-
- timeout = jiffies + HZ;
- do {
- udelay(1);
- buf = lan743x_csr_read(adapter, OTP_PWR_DN);
- if (time_after(jiffies, timeout)) {
- netif_warn(adapter, drv, adapter->netdev,
- "timeout on OTP_PWR_DN completion\n");
- return -EIO;
- }
- } while (buf & OTP_PWR_DN_PWRDN_N_);
+ ret = lan743x_otp_power_up(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < length; i++) {
+ lan743x_otp_set_address(adapter, offset + i);
+
+ lan743x_otp_read_go(adapter);
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ data[i] = lan743x_csr_read(adapter, OTP_READ_DATA);
}
+ lan743x_otp_power_down(adapter);
+
+ return 0;
+}
+
+static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ if (offset + length > MAX_OTP_SIZE)
+ return -EINVAL;
+
+ ret = lan743x_otp_power_up(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
/* set to BYTE program mode */
lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
for (i = 0; i < length; i++) {
- lan743x_csr_write(adapter, OTP_ADDR1,
- ((offset + i) >> 8) &
- OTP_ADDR1_15_11_MASK_);
- lan743x_csr_write(adapter, OTP_ADDR2,
- ((offset + i) &
- OTP_ADDR2_10_3_MASK_));
+ lan743x_otp_set_address(adapter, offset + i);
+
lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]);
lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_);
- timeout = jiffies + HZ;
- do {
- udelay(1);
- buf = lan743x_csr_read(adapter, OTP_STATUS);
- if (time_after(jiffies, timeout)) {
- netif_warn(adapter, drv, adapter->netdev,
- "Timeout on OTP_STATUS completion\n");
- return -EIO;
- }
- } while (buf & OTP_STATUS_BUSY_);
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
}
+ lan743x_otp_power_down(adapter);
+
return 0;
}
@@ -120,6 +197,9 @@ static int lan743x_eeprom_read(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_eeprom_confirm_not_busy(adapter);
if (retval)
return retval;
@@ -148,6 +228,9 @@ static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_eeprom_confirm_not_busy(adapter);
if (retval)
return retval;
@@ -207,6 +290,11 @@ static void lan743x_ethtool_set_msglevel(struct net_device *netdev,
static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
+ return MAX_OTP_SIZE;
+
return MAX_EEPROM_SIZE;
}
@@ -214,8 +302,14 @@ static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
+ ret = lan743x_otp_read(adapter, ee->offset, ee->len, data);
+ else
+ ret = lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
- return lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
+ return ret;
}
static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
@@ -224,17 +318,18 @@ static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
struct lan743x_adapter *adapter = netdev_priv(netdev);
int ret = -EINVAL;
- if (ee->magic == LAN743X_EEPROM_MAGIC)
- ret = lan743x_eeprom_write(adapter, ee->offset, ee->len,
- data);
- /* Beware! OTP is One Time Programming ONLY!
- * So do some strict condition check before messing up
- */
- else if ((ee->magic == LAN743X_OTP_MAGIC) &&
- (ee->offset == 0) &&
- (ee->len == MAX_EEPROM_SIZE) &&
- (data[0] == OTP_INDICATOR_1))
- ret = lan743x_otp_write(adapter, ee->offset, ee->len, data);
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
+ /* Beware! OTP is One Time Programming ONLY! */
+ if (ee->magic == LAN743X_OTP_MAGIC) {
+ ret = lan743x_otp_write(adapter, ee->offset,
+ ee->len, data);
+ }
+ } else {
+ if (ee->magic == LAN743X_EEPROM_MAGIC) {
+ ret = lan743x_eeprom_write(adapter, ee->offset,
+ ee->len, data);
+ }
+ }
return ret;
}
@@ -360,6 +455,10 @@ static const u32 lan743x_set2_hw_cnt_addr[] = {
STAT_TX_COUNTER_ROLLOVER_STATUS
};
+static const char lan743x_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "OTP_ACCESS",
+};
+
static void lan743x_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
@@ -375,6 +474,10 @@ static void lan743x_ethtool_get_strings(struct net_device *netdev,
lan743x_set2_hw_cnt_strings,
sizeof(lan743x_set2_hw_cnt_strings));
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, lan743x_priv_flags_strings,
+ sizeof(lan743x_priv_flags_strings));
+ break;
}
}
@@ -399,6 +502,22 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
}
}
+static u32 lan743x_ethtool_get_priv_flags(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->flags;
+}
+
+static int lan743x_ethtool_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->flags = flags;
+
+ return 0;
+}
+
static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
@@ -411,6 +530,8 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
return ret;
}
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(lan743x_priv_flags_strings);
default:
return -EOPNOTSUPP;
}
@@ -705,6 +826,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.set_eeprom = lan743x_ethtool_set_eeprom,
.get_strings = lan743x_ethtool_get_strings,
.get_ethtool_stats = lan743x_ethtool_get_ethtool_stats,
+ .get_priv_flags = lan743x_ethtool_get_priv_flags,
+ .set_priv_flags = lan743x_ethtool_set_priv_flags,
.get_sset_count = lan743x_ethtool_get_sset_count,
.get_rxnfc = lan743x_ethtool_get_rxnfc,
.get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 2d6eea18973e..3b02eeae5f45 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -26,6 +26,8 @@
#define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF)
#define HW_CFG (0x010)
+#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0)
+#define HW_CFG_EE_OTP_RELOAD_ BIT(4)
#define HW_CFG_LRST_ BIT(1)
#define PMT_CTL (0x014)
@@ -453,17 +455,19 @@
#define OTP_PWR_DN (0x1000)
#define OTP_PWR_DN_PWRDN_N_ BIT(0)
-#define OTP_ADDR1 (0x1004)
-#define OTP_ADDR1_15_11_MASK_ (0x1F)
-
-#define OTP_ADDR2 (0x1008)
-#define OTP_ADDR2_10_3_MASK_ (0xFF)
+#define OTP_ADDR_HIGH (0x1004)
+#define OTP_ADDR_LOW (0x1008)
#define OTP_PRGM_DATA (0x1010)
#define OTP_PRGM_MODE (0x1014)
#define OTP_PRGM_MODE_BYTE_ BIT(0)
+#define OTP_READ_DATA (0x1018)
+
+#define OTP_FUNC_CMD (0x1020)
+#define OTP_FUNC_CMD_READ_ BIT(0)
+
#define OTP_TST_CMD (0x1024)
#define OTP_TST_CMD_PRGVRFY_ BIT(3)
@@ -713,6 +717,9 @@ struct lan743x_adapter {
struct lan743x_phy phy;
struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS];
struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS];
+
+#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
+ u32 flags;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 215a45374d7b..c6a575eb0ff5 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -721,7 +721,8 @@ static void ocelot_get_stats64(struct net_device *dev,
static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags)
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 5ae3fa82909f..0da7393b2ef3 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2553,7 +2553,7 @@ static int vxge_add_isr(struct vxgedev *vdev)
vxge_debug_init(VXGE_ERR,
"%s: Defaulting to INTA",
vdev->ndev->name);
- goto INTA_MODE;
+ goto INTA_MODE;
}
msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e23ca90289f7..aa3a2098a583 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1266,7 +1266,7 @@ wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u64 imm = insn->imm; /* sign extend */
if (skip) {
- meta->skip = true;
+ meta->flags |= FLAG_INSN_SKIP_NOOP;
return 0;
}
@@ -1296,7 +1296,7 @@ wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_insn *insn = &meta->insn;
if (skip) {
- meta->skip = true;
+ meta->flags |= FLAG_INSN_SKIP_NOOP;
return 0;
}
@@ -1334,8 +1334,9 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
insn->src_reg * 2, br_mask, insn->off);
- wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
- insn->src_reg * 2 + 1, br_mask, insn->off);
+ if (is_mbpf_jmp64(meta))
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
+ insn->src_reg * 2 + 1, br_mask, insn->off);
return 0;
}
@@ -1390,13 +1391,15 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
else
emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
- tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
- if (!code->swap)
- emit_alu(nfp_prog, reg_none(),
- reg_a(reg + 1), carry_op, tmp_reg);
- else
- emit_alu(nfp_prog, reg_none(),
- tmp_reg, carry_op, reg_a(reg + 1));
+ if (is_mbpf_jmp64(meta)) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ if (!code->swap)
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(reg + 1), carry_op, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(),
+ tmp_reg, carry_op, reg_a(reg + 1));
+ }
emit_br(nfp_prog, code->br_mask, insn->off, 0);
@@ -1423,8 +1426,9 @@ static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
}
emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
- emit_alu(nfp_prog, reg_none(),
- reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
+ if (is_mbpf_jmp64(meta))
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
@@ -3048,6 +3052,19 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ swreg tmp_reg;
+
+ tmp_reg = ur_load_imm_any(nfp_prog, insn->imm, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -3061,9 +3078,10 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
/* Upper word of the mask can only be 0 or ~0 from sign extension,
* so either ignore it or OR the whole thing in.
*/
- if (imm >> 32)
+ if (is_mbpf_jmp64(meta) && imm >> 32) {
emit_alu(nfp_prog, reg_none(),
reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog));
+ }
emit_br(nfp_prog, BR_BNE, insn->off, 0);
return 0;
@@ -3073,11 +3091,16 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
+ bool is_jmp32 = is_mbpf_jmp32(meta);
swreg tmp_reg;
if (!imm) {
- emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
- ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
+ if (is_jmp32)
+ emit_alu(nfp_prog, reg_none(), reg_none(), ALU_OP_NONE,
+ reg_b(insn->dst_reg * 2));
+ else
+ emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
+ ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
emit_br(nfp_prog, BR_BNE, insn->off, 0);
return 0;
}
@@ -3087,6 +3110,9 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ if (is_jmp32)
+ return 0;
+
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
emit_alu(nfp_prog, reg_none(),
reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
@@ -3101,10 +3127,13 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
ALU_OP_XOR, reg_b(insn->src_reg * 2));
- emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
- ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
- emit_alu(nfp_prog, reg_none(),
- imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
+ if (is_mbpf_jmp64(meta)) {
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR,
+ reg_b(insn->src_reg * 2 + 1));
+ emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR,
+ imm_b(nfp_prog));
+ }
emit_br(nfp_prog, BR_BEQ, insn->off, 0);
return 0;
@@ -3182,7 +3211,7 @@ bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL);
} else {
ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
- emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1);
+ emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1);
offset_br = nfp_prog_current_offset(nfp_prog);
}
wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL);
@@ -3369,6 +3398,28 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
+ [BPF_JMP32 | BPF_JEQ | BPF_K] = jeq32_imm,
+ [BPF_JMP32 | BPF_JGT | BPF_K] = cmp_imm,
+ [BPF_JMP32 | BPF_JGE | BPF_K] = cmp_imm,
+ [BPF_JMP32 | BPF_JLT | BPF_K] = cmp_imm,
+ [BPF_JMP32 | BPF_JLE | BPF_K] = cmp_imm,
+ [BPF_JMP32 | BPF_JSGT | BPF_K] =cmp_imm,
+ [BPF_JMP32 | BPF_JSGE | BPF_K] =cmp_imm,
+ [BPF_JMP32 | BPF_JSLT | BPF_K] =cmp_imm,
+ [BPF_JMP32 | BPF_JSLE | BPF_K] =cmp_imm,
+ [BPF_JMP32 | BPF_JSET | BPF_K] =jset_imm,
+ [BPF_JMP32 | BPF_JNE | BPF_K] = jne_imm,
+ [BPF_JMP32 | BPF_JEQ | BPF_X] = jeq_reg,
+ [BPF_JMP32 | BPF_JGT | BPF_X] = cmp_reg,
+ [BPF_JMP32 | BPF_JGE | BPF_X] = cmp_reg,
+ [BPF_JMP32 | BPF_JLT | BPF_X] = cmp_reg,
+ [BPF_JMP32 | BPF_JLE | BPF_X] = cmp_reg,
+ [BPF_JMP32 | BPF_JSGT | BPF_X] =cmp_reg,
+ [BPF_JMP32 | BPF_JSGE | BPF_X] =cmp_reg,
+ [BPF_JMP32 | BPF_JSLT | BPF_X] =cmp_reg,
+ [BPF_JMP32 | BPF_JSLE | BPF_X] =cmp_reg,
+ [BPF_JMP32 | BPF_JSET | BPF_X] =jset_reg,
+ [BPF_JMP32 | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
[BPF_JMP | BPF_EXIT] = jmp_exit,
};
@@ -3395,9 +3446,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
int err;
list_for_each_entry(meta, &nfp_prog->insns, l) {
- if (meta->skip)
+ if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
- if (BPF_CLASS(meta->insn.code) != BPF_JMP)
+ if (!is_mbpf_jmp(meta))
continue;
if (meta->insn.code == (BPF_JMP | BPF_EXIT) &&
!nfp_is_main_function(meta))
@@ -3439,7 +3490,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
jmp_dst = meta->jmp_dst;
- if (jmp_dst->skip) {
+ if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) {
pr_err("Branch landing on removed instruction!!\n");
return -ELOOP;
}
@@ -3689,7 +3740,7 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
return nfp_prog->error;
}
- if (meta->skip) {
+ if (meta->flags & FLAG_INSN_SKIP_MASK) {
nfp_prog->n_translated++;
continue;
}
@@ -3737,10 +3788,10 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
/* Programs start with R6 = R1 but we ignore the skb pointer */
if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
insn.src_reg == 1 && insn.dst_reg == 6)
- meta->skip = true;
+ meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
/* Return as soon as something doesn't match */
- if (!meta->skip)
+ if (!(meta->flags & FLAG_INSN_SKIP_MASK))
return;
}
}
@@ -3755,19 +3806,17 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
list_for_each_entry(meta, &nfp_prog->insns, l) {
struct bpf_insn insn = meta->insn;
- if (meta->skip)
+ if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
- if (BPF_CLASS(insn.code) != BPF_ALU &&
- BPF_CLASS(insn.code) != BPF_ALU64 &&
- BPF_CLASS(insn.code) != BPF_JMP)
+ if (!is_mbpf_alu(meta) && !is_mbpf_jmp(meta))
continue;
if (BPF_SRC(insn.code) != BPF_K)
continue;
if (insn.imm >= 0)
continue;
- if (BPF_CLASS(insn.code) == BPF_JMP) {
+ if (is_mbpf_jmp(meta)) {
switch (BPF_OP(insn.code)) {
case BPF_JGE:
case BPF_JSGE:
@@ -3829,7 +3878,7 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
continue;
- meta2->skip = true;
+ meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
}
@@ -3869,8 +3918,8 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
meta3->flags & FLAG_INSN_IS_JUMP_DST)
continue;
- meta2->skip = true;
- meta3->skip = true;
+ meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
+ meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
}
@@ -4065,7 +4114,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
}
head_ld_meta->paired_st = &head_st_meta->insn;
- head_st_meta->skip = true;
+ head_st_meta->flags |=
+ FLAG_INSN_SKIP_PREC_DEPENDENT;
} else {
head_ld_meta->ldst_gather_len = 0;
}
@@ -4098,8 +4148,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
head_ld_meta = meta1;
head_st_meta = meta2;
} else {
- meta1->skip = true;
- meta2->skip = true;
+ meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
+ meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
@@ -4124,7 +4174,7 @@ static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
if (meta->flags & FLAG_INSN_IS_JUMP_DST)
cache_avail = false;
- if (meta->skip)
+ if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
insn = &meta->insn;
@@ -4210,7 +4260,7 @@ start_new:
}
list_for_each_entry(meta, &nfp_prog->insns, l) {
- if (meta->skip)
+ if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
@@ -4246,7 +4296,8 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
u32 id;
nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
- if (meta1->skip || meta2->skip)
+ if (meta1->flags & FLAG_INSN_SKIP_MASK ||
+ meta2->flags & FLAG_INSN_SKIP_MASK)
continue;
if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
@@ -4325,7 +4376,7 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
return ret;
}
-void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta;
@@ -4336,7 +4387,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
unsigned int dst_idx;
bool pseudo_call;
- if (BPF_CLASS(code) != BPF_JMP)
+ if (!is_mbpf_jmp(meta))
continue;
if (BPF_OP(code) == BPF_EXIT)
continue;
@@ -4353,7 +4404,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
else
dst_idx = meta->n + 1 + meta->insn.off;
- dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt);
+ dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx);
if (pseudo_call)
dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 941277936475..b25a48218bcf 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -243,6 +243,16 @@ struct nfp_bpf_reg_state {
#define FLAG_INSN_IS_JUMP_DST BIT(0)
#define FLAG_INSN_IS_SUBPROG_START BIT(1)
#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2)
+/* Instruction is pointless, noop even on its own */
+#define FLAG_INSN_SKIP_NOOP BIT(3)
+/* Instruction is optimized out based on preceding instructions */
+#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
+/* Instruction is optimized by the verifier */
+#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
+
+#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
+ FLAG_INSN_SKIP_PREC_DEPENDENT | \
+ FLAG_INSN_SKIP_VERIFIER_OPT)
/**
* struct nfp_insn_meta - BPF instruction wrapper
@@ -271,7 +281,6 @@ struct nfp_bpf_reg_state {
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
* @subprog_idx: index of subprogram to which the instruction belongs
- * @skip: skip this instruction (optimized out)
* @double_cb: callback for second part of the instruction
* @l: link on nfp_prog->insns list
*/
@@ -319,7 +328,6 @@ struct nfp_insn_meta {
unsigned short n;
unsigned short flags;
unsigned short subprog_idx;
- bool skip;
instr_cb_t double_cb;
struct list_head l;
@@ -357,6 +365,21 @@ static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
}
+static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta)
+{
+ return mbpf_class(meta) == BPF_JMP32;
+}
+
+static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta)
+{
+ return mbpf_class(meta) == BPF_JMP;
+}
+
+static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta)
+{
+ return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta);
+}
+
static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
{
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
@@ -407,6 +430,20 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
}
+static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
+{
+ u8 op;
+
+ if (is_mbpf_jmp32(meta))
+ return true;
+
+ if (!is_mbpf_jmp64(meta))
+ return false;
+
+ op = mbpf_op(meta);
+ return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
+}
+
static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
{
struct bpf_insn insn = meta->insn;
@@ -457,6 +494,7 @@ struct nfp_bpf_subprog_info {
* @subprog_cnt: number of sub-programs, including main function
* @map_records: the map record pointers from bpf->maps_neutral
* @subprog: pointer to an array of objects holding info about sub-programs
+ * @n_insns: number of instructions on @insns list
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@@ -489,6 +527,7 @@ struct nfp_prog {
struct nfp_bpf_neutral_map **map_records;
struct nfp_bpf_subprog_info *subprog;
+ unsigned int n_insns;
struct list_head insns;
};
@@ -505,7 +544,7 @@ struct nfp_bpf_vnic {
};
bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
-void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
@@ -513,6 +552,10 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx);
int nfp_bpf_finalize(struct bpf_verifier_env *env);
+int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
+
extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
struct netdev_bpf;
@@ -526,7 +569,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- unsigned int insn_idx, unsigned int n_insns);
+ unsigned int insn_idx);
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index f0283854fade..55c7dbf8b421 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -163,8 +163,9 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
list_add_tail(&meta->l, &nfp_prog->insns);
}
+ nfp_prog->n_insns = cnt;
- nfp_bpf_jit_prepare(nfp_prog, cnt);
+ nfp_bpf_jit_prepare(nfp_prog);
return 0;
}
@@ -219,6 +220,10 @@ static int nfp_bpf_translate(struct bpf_prog *prog)
unsigned int max_instr;
int err;
+ /* We depend on dead code elimination succeeding */
+ if (prog->aux->offload->opt_failed)
+ return -EINVAL;
+
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
@@ -591,6 +596,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
.insn_hook = nfp_verify_insn,
.finalize = nfp_bpf_finalize,
+ .replace_insn = nfp_bpf_opt_replace_insn,
+ .remove_insns = nfp_bpf_opt_remove_insns,
.prepare = nfp_bpf_verifier_prep,
.translate = nfp_bpf_translate,
.destroy = nfp_bpf_destroy,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 337bb862ec1d..36f56eb4cbe2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -18,15 +18,15 @@
struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- unsigned int insn_idx, unsigned int n_insns)
+ unsigned int insn_idx)
{
unsigned int forward, backward, i;
backward = meta->n - insn_idx;
forward = insn_idx - meta->n;
- if (min(forward, backward) > n_insns - insn_idx - 1) {
- backward = n_insns - insn_idx - 1;
+ if (min(forward, backward) > nfp_prog->n_insns - insn_idx - 1) {
+ backward = nfp_prog->n_insns - insn_idx - 1;
meta = nfp_prog_last_meta(nfp_prog);
}
if (min(forward, backward) > insn_idx && backward > insn_idx) {
@@ -629,7 +629,7 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
- meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
+ meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx);
nfp_prog->verifier_meta = meta;
if (!nfp_bpf_supported_opcode(meta->insn.code)) {
@@ -690,8 +690,7 @@ nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env,
return 0;
}
-static unsigned int
-nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt)
+static unsigned int nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog);
unsigned int max_depth = 0, depth = 0, frame = 0;
@@ -726,7 +725,7 @@ continue_subprog:
/* Find the callee and start processing it. */
meta = nfp_bpf_goto_meta(nfp_prog, meta,
- meta->n + 1 + meta->insn.imm, cnt);
+ meta->n + 1 + meta->insn.imm);
idx = meta->subprog_idx;
frame++;
goto process_subprog;
@@ -778,8 +777,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
nn = netdev_priv(env->prog->aux->offload->netdev);
max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
- nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog,
- env->prog->len);
+ nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog);
if (nfp_prog->stack_size > max_stack) {
pr_vlog(env, "stack too large: program %dB > FW stack %dB\n",
nfp_prog->stack_size, max_stack);
@@ -788,3 +786,61 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
+
+int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn)
+{
+ struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
+
+ meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
+ nfp_prog->verifier_meta = meta;
+
+ /* conditional jump to jump conversion */
+ if (is_mbpf_cond_jump(meta) &&
+ insn->code == (BPF_JMP | BPF_JA | BPF_K)) {
+ unsigned int tgt_off;
+
+ tgt_off = off + insn->off + 1;
+
+ if (!insn->off) {
+ meta->jmp_dst = list_next_entry(meta, l);
+ meta->jump_neg_op = false;
+ } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) {
+ pr_vlog(env, "branch hard wire at %d changes target %d -> %d\n",
+ off, meta->jmp_dst->n,
+ aux_data[tgt_off].orig_idx);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ pr_vlog(env, "unsupported instruction replacement %hhx -> %hhx\n",
+ meta->insn.code, insn->code);
+ return -EINVAL;
+}
+
+int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
+{
+ struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
+ unsigned int i;
+
+ meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
+
+ for (i = 0; i < cnt; i++) {
+ if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns))
+ return -EINVAL;
+
+ /* doesn't count if it already has the flag */
+ if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT)
+ i--;
+
+ meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT;
+ meta = list_next_entry(meta, l);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 4c5eaf36d5bb..56b22ea32474 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -203,7 +203,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
}
atomic_inc(&priv->reify_replies);
- wake_up_interruptible(&priv->reify_wait_queue);
+ wake_up(&priv->reify_wait_queue);
}
static void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 15f41cfef9f1..4fcaf11ed56e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -97,6 +97,9 @@
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
+/* Cmesg reply (empirical) timeout*/
+#define NFP_FL_REPLY_TIMEOUT msecs_to_jiffies(40)
+
#define nfp_flower_cmsg_warn(app, fmt, args...) \
do { \
if (net_ratelimit()) \
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 5059110a1768..408089133599 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -32,6 +32,71 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
return DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
+static struct nfp_flower_non_repr_priv *
+nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_non_repr_priv *entry;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(entry, &priv->non_repr_priv, list)
+ if (entry->netdev == netdev)
+ return entry;
+
+ return NULL;
+}
+
+void
+__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
+{
+ non_repr_priv->ref_count++;
+}
+
+struct nfp_flower_non_repr_priv *
+nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_non_repr_priv *entry;
+
+ entry = nfp_flower_non_repr_priv_lookup(app, netdev);
+ if (entry)
+ goto inc_ref;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->netdev = netdev;
+ list_add(&entry->list, &priv->non_repr_priv);
+
+inc_ref:
+ __nfp_flower_non_repr_priv_get(entry);
+ return entry;
+}
+
+void
+__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
+{
+ if (--non_repr_priv->ref_count)
+ return;
+
+ list_del(&non_repr_priv->list);
+ kfree(non_repr_priv);
+}
+
+void
+nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_non_repr_priv *entry;
+
+ entry = nfp_flower_non_repr_priv_lookup(app, netdev);
+ if (!entry)
+ return;
+
+ __nfp_flower_non_repr_priv_put(entry);
+}
+
static enum nfp_repr_type
nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
{
@@ -107,16 +172,14 @@ static int
nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
{
struct nfp_flower_priv *priv = app->priv;
- int err;
if (!tot_repl)
return 0;
lockdep_assert_held(&app->pf->lock);
- err = wait_event_interruptible_timeout(priv->reify_wait_queue,
- atomic_read(replies) >= tot_repl,
- msecs_to_jiffies(10));
- if (err <= 0) {
+ if (!wait_event_timeout(priv->reify_wait_queue,
+ atomic_read(replies) >= tot_repl,
+ NFP_FL_REPLY_TIMEOUT)) {
nfp_warn(app->cpp, "Not all reprs responded to reify\n");
return -EIO;
}
@@ -223,6 +286,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
nfp_repr = netdev_priv(repr);
nfp_repr->app_priv = repr_priv;
+ repr_priv->nfp_repr = nfp_repr;
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
@@ -337,6 +401,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
nfp_repr = netdev_priv(repr);
nfp_repr->app_priv = repr_priv;
+ repr_priv->nfp_repr = nfp_repr;
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
@@ -476,8 +541,8 @@ err_clear_nn:
static int nfp_flower_init(struct nfp_app *app)
{
+ u64 version, features, ctx_count, num_mems;
const struct nfp_pf *pf = app->pf;
- u64 version, features, ctx_count;
struct nfp_flower_priv *app_priv;
int err;
@@ -502,6 +567,23 @@ static int nfp_flower_init(struct nfp_app *app)
return err;
}
+ num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT",
+ &err);
+ if (err) {
+ nfp_warn(app->cpp,
+ "FlowerNIC: unsupported host context memory: %d\n",
+ err);
+ err = 0;
+ num_mems = 1;
+ }
+
+ if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
+ nfp_warn(app->cpp,
+ "FlowerNIC: invalid host context memory: %llu\n",
+ num_mems);
+ return -EINVAL;
+ }
+
ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
&err);
if (err) {
@@ -522,6 +604,8 @@ static int nfp_flower_init(struct nfp_app *app)
if (!app_priv)
return -ENOMEM;
+ app_priv->total_mem_units = num_mems;
+ app_priv->active_mem_unit = 0;
app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
app->priv = app_priv;
app_priv->app = app;
@@ -533,7 +617,7 @@ static int nfp_flower_init(struct nfp_app *app)
init_waitqueue_head(&app_priv->mtu_conf.wait_q);
spin_lock_init(&app_priv->mtu_conf.lock);
- err = nfp_flower_metadata_init(app, ctx_count);
+ err = nfp_flower_metadata_init(app, ctx_count, num_mems);
if (err)
goto err_free_app_priv;
@@ -558,6 +642,7 @@ static int nfp_flower_init(struct nfp_app *app)
}
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
+ INIT_LIST_HEAD(&app_priv->non_repr_priv);
return 0;
@@ -601,7 +686,7 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
{
struct nfp_flower_priv *app_priv = app->priv;
struct nfp_repr *repr = netdev_priv(netdev);
- int err, ack;
+ int err;
/* Only need to config FW for physical port MTU change. */
if (repr->port->type != NFP_PORT_PHYS_PORT)
@@ -628,11 +713,9 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
}
/* Wait for fw to ack the change. */
- ack = wait_event_timeout(app_priv->mtu_conf.wait_q,
- nfp_flower_check_ack(app_priv),
- msecs_to_jiffies(10));
-
- if (!ack) {
+ if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
+ nfp_flower_check_ack(app_priv),
+ NFP_FL_REPLY_TIMEOUT)) {
spin_lock_bh(&app_priv->mtu_conf.lock);
app_priv->mtu_conf.requested_val = 0;
spin_unlock_bh(&app_priv->mtu_conf.lock);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index b858bac47621..c0945a5fd1a4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -20,6 +20,9 @@ struct nfp_fl_pre_lag;
struct net_device;
struct nfp_app;
+#define NFP_FL_STAT_ID_MU_NUM GENMASK(31, 22)
+#define NFP_FL_STAT_ID_STAT GENMASK(21, 0)
+
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
@@ -54,6 +57,26 @@ struct nfp_fl_stats_id {
};
/**
+ * struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
+ * @offloaded_macs: Hashtable of the offloaded MAC addresses
+ * @ipv4_off_list: List of IPv4 addresses to offload
+ * @neigh_off_list: List of neighbour offloads
+ * @ipv4_off_lock: Lock for the IPv4 address list
+ * @neigh_off_lock: Lock for the neighbour address list
+ * @mac_off_ids: IDA to manage id assignment for offloaded MACs
+ * @neigh_nb: Notifier to monitor neighbour state
+ */
+struct nfp_fl_tunnel_offloads {
+ struct rhashtable offloaded_macs;
+ struct list_head ipv4_off_list;
+ struct list_head neigh_off_list;
+ struct mutex ipv4_off_lock;
+ spinlock_t neigh_off_lock;
+ struct ida mac_off_ids;
+ struct notifier_block neigh_nb;
+};
+
+/**
* struct nfp_mtu_conf - manage MTU setting
* @portnum: NFP port number of repr with requested MTU change
* @requested_val: MTU value requested for repr
@@ -113,23 +136,16 @@ struct nfp_fl_lag {
* processing
* @cmsg_skbs_low: List of lower priority skbs for control message
* processing
- * @nfp_mac_off_list: List of MAC addresses to offload
- * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
- * @nfp_ipv4_off_list: List of IPv4 addresses to offload
- * @nfp_neigh_off_list: List of neighbour offloads
- * @nfp_mac_off_lock: Lock for the MAC address list
- * @nfp_mac_index_lock: Lock for the MAC index list
- * @nfp_ipv4_off_lock: Lock for the IPv4 address list
- * @nfp_neigh_off_lock: Lock for the neighbour address list
- * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
- * @nfp_mac_off_count: Number of MACs in address list
- * @nfp_tun_neigh_nb: Notifier to monitor neighbour state
+ * @tun: Tunnel offload data
* @reify_replies: atomically stores the number of replies received
* from firmware for repr reify
* @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value
* @nfp_lag: Link aggregation data block
* @indr_block_cb_priv: List of priv data passed to indirect block cbs
+ * @non_repr_priv: List of offloaded non-repr ports and their priv data
+ * @active_mem_unit: Current active memory unit for flower rules
+ * @total_mem_units: Total number of available memory units for flower rules
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -147,30 +163,47 @@ struct nfp_flower_priv {
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
- struct list_head nfp_mac_off_list;
- struct list_head nfp_mac_index_list;
- struct list_head nfp_ipv4_off_list;
- struct list_head nfp_neigh_off_list;
- struct mutex nfp_mac_off_lock;
- struct mutex nfp_mac_index_lock;
- struct mutex nfp_ipv4_off_lock;
- spinlock_t nfp_neigh_off_lock;
- struct ida nfp_mac_off_ids;
- int nfp_mac_off_count;
- struct notifier_block nfp_tun_neigh_nb;
+ struct nfp_fl_tunnel_offloads tun;
atomic_t reify_replies;
wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf;
struct nfp_fl_lag nfp_lag;
struct list_head indr_block_cb_priv;
+ struct list_head non_repr_priv;
+ unsigned int active_mem_unit;
+ unsigned int total_mem_units;
};
/**
* struct nfp_flower_repr_priv - Flower APP per-repr priv data
+ * @nfp_repr: Back pointer to nfp_repr
* @lag_port_flags: Extended port flags to record lag state of repr
+ * @mac_offloaded: Flag indicating a MAC address is offloaded for repr
+ * @offloaded_mac_addr: MAC address that has been offloaded for repr
+ * @mac_list: List entry of reprs that share the same offloaded MAC
*/
struct nfp_flower_repr_priv {
+ struct nfp_repr *nfp_repr;
unsigned long lag_port_flags;
+ bool mac_offloaded;
+ u8 offloaded_mac_addr[ETH_ALEN];
+ struct list_head mac_list;
+};
+
+/**
+ * struct nfp_flower_non_repr_priv - Priv data for non-repr offloaded ports
+ * @list: List entry of offloaded reprs
+ * @netdev: Pointer to non-repr net_device
+ * @ref_count: Number of references held for this priv data
+ * @mac_offloaded: Flag indicating a MAC address is offloaded for device
+ * @offloaded_mac_addr: MAC address that has been offloaded for dev
+ */
+struct nfp_flower_non_repr_priv {
+ struct list_head list;
+ struct net_device *netdev;
+ int ref_count;
+ bool mac_offloaded;
+ u8 offloaded_mac_addr[ETH_ALEN];
};
struct nfp_fl_key_ls {
@@ -217,7 +250,8 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
-int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count);
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
+ unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
@@ -252,7 +286,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app);
int nfp_tunnel_mac_event_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event, void *ptr);
-void nfp_tunnel_write_macs(struct nfp_app *app);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
@@ -273,4 +306,12 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event);
+void
+__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
+struct nfp_flower_non_repr_priv *
+nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev);
+void
+__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
+void
+nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index cdf75595f627..c04a0d6b0184 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -403,9 +403,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
- /* Configure tunnel end point MAC. */
- nfp_tunnel_write_macs(app);
-
/* Store the tunnel destination in the rule data.
* This must be present and be an exact match.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 573a4400a26c..492837b852b6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -4,6 +4,7 @@
#include <linux/hash.h>
#include <linux/hashtable.h>
#include <linux/jhash.h>
+#include <linux/math64.h>
#include <linux/vmalloc.h>
#include <net/pkt_cls.h>
@@ -52,8 +53,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
freed_stats_id = priv->stats_ring_size;
/* Check for unallocated entries first. */
if (priv->stats_ids.init_unalloc > 0) {
- *stats_context_id = priv->stats_ids.init_unalloc - 1;
- priv->stats_ids.init_unalloc--;
+ if (priv->active_mem_unit == priv->total_mem_units) {
+ priv->stats_ids.init_unalloc--;
+ priv->active_mem_unit = 0;
+ }
+
+ *stats_context_id =
+ FIELD_PREP(NFP_FL_STAT_ID_STAT,
+ priv->stats_ids.init_unalloc - 1) |
+ FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
+ priv->active_mem_unit);
+ priv->active_mem_unit++;
return 0;
}
@@ -381,10 +391,11 @@ const struct rhashtable_params nfp_flower_table_params = {
.automatic_shrinking = true,
};
-int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
+ unsigned int host_num_mems)
{
struct nfp_flower_priv *priv = app->priv;
- int err;
+ int err, stats_size;
hash_init(priv->mask_table);
@@ -417,10 +428,12 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
- priv->stats_ids.init_unalloc = host_ctx_count;
+ priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
- priv->stats = kvmalloc_array(priv->stats_ring_size,
- sizeof(struct nfp_fl_stats), GFP_KERNEL);
+ stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
+ FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
+ priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
+ GFP_KERNEL);
if (!priv->stats)
goto err_free_ring_buf;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 2d9f26a725c2..4d78be4ec4e9 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -98,47 +98,51 @@ struct nfp_ipv4_addr_entry {
struct list_head list;
};
-/**
- * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
- * @reserved: reserved for future use
- * @count: number of MAC addresses in the message
- * @addresses.index: index of MAC address in the lookup table
- * @addresses.addr: interface MAC address
- * @addresses: series of MACs to offload
- */
-struct nfp_tun_mac_addr {
- __be16 reserved;
- __be16 count;
- struct index_mac_addr {
- __be16 index;
- u8 addr[ETH_ALEN];
- } addresses[];
-};
+#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
/**
- * struct nfp_tun_mac_offload_entry - list of MACs to offload
- * @index: index of MAC address for offloading
+ * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
+ * @flags: MAC address offload options
+ * @count: number of MAC addresses in the message (should be 1)
+ * @index: index of MAC address in the lookup table
* @addr: interface MAC address
- * @list: list pointer
*/
-struct nfp_tun_mac_offload_entry {
+struct nfp_tun_mac_addr_offload {
+ __be16 flags;
+ __be16 count;
__be16 index;
u8 addr[ETH_ALEN];
- struct list_head list;
+};
+
+enum nfp_flower_mac_offload_cmd {
+ NFP_TUNNEL_MAC_OFFLOAD_ADD = 0,
+ NFP_TUNNEL_MAC_OFFLOAD_DEL = 1,
+ NFP_TUNNEL_MAC_OFFLOAD_MOD = 2,
};
#define NFP_MAX_MAC_INDEX 0xff
/**
- * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
- * @ifindex: netdev ifindex of the device
- * @index: index of netdevs mac on NFP
- * @list: list pointer
+ * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
+ * @ht_node: Hashtable entry
+ * @addr: Offloaded MAC address
+ * @index: Offloaded index for given MAC address
+ * @ref_count: Number of devs using this MAC address
+ * @repr_list: List of reprs sharing this MAC address
*/
-struct nfp_tun_mac_non_nfp_idx {
- int ifindex;
- u8 index;
- struct list_head list;
+struct nfp_tun_offloaded_mac {
+ struct rhash_head ht_node;
+ u8 addr[ETH_ALEN];
+ u16 index;
+ int ref_count;
+ struct list_head repr_list;
+};
+
+static const struct rhashtable_params offloaded_macs_params = {
+ .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr),
+ .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node),
+ .key_len = ETH_ALEN,
+ .automatic_shrinking = true,
};
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
@@ -205,15 +209,15 @@ static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
- spin_lock_bh(&priv->nfp_neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ spin_lock_bh(&priv->tun.neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
return true;
}
}
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
return false;
}
@@ -223,24 +227,24 @@ static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
- spin_lock_bh(&priv->nfp_neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ spin_lock_bh(&priv->tun.neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
return;
}
}
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
return;
}
entry->ipv4_addr = ipv4_addr;
- list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ list_add_tail(&entry->list, &priv->tun.neigh_off_list);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
}
static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
@@ -249,8 +253,8 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
- spin_lock_bh(&priv->nfp_neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ spin_lock_bh(&priv->tun.neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) {
list_del(&entry->list);
@@ -258,7 +262,7 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
break;
}
}
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
}
static void
@@ -326,7 +330,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
if (!nfp_netdev_is_nfp_repr(n->dev))
return NOTIFY_DONE;
- app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
+ app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
/* Only concerned with changes to routes already added to NFP. */
@@ -401,11 +405,11 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app)
int count;
memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
- mutex_lock(&priv->nfp_ipv4_off_lock);
+ mutex_lock(&priv->tun.ipv4_off_lock);
count = 0;
- list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
if (count >= NFP_FL_IPV4_ADDRS_MAX) {
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
return;
}
@@ -413,7 +417,7 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app)
payload.ipv4_addr[count++] = entry->ipv4_addr;
}
payload.count = cpu_to_be32(count);
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
sizeof(struct nfp_tun_ipv4_addr),
@@ -426,26 +430,26 @@ void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
struct nfp_ipv4_addr_entry *entry;
struct list_head *ptr, *storage;
- mutex_lock(&priv->nfp_ipv4_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ mutex_lock(&priv->tun.ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
if (entry->ipv4_addr == ipv4) {
entry->ref_count++;
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
return;
}
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
return;
}
entry->ipv4_addr = ipv4;
entry->ref_count = 1;
- list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_tun_write_ipv4_list(app);
}
@@ -456,8 +460,8 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
struct nfp_ipv4_addr_entry *entry;
struct list_head *ptr, *storage;
- mutex_lock(&priv->nfp_ipv4_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ mutex_lock(&priv->tun.ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
if (entry->ipv4_addr == ipv4) {
entry->ref_count--;
@@ -468,191 +472,357 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
break;
}
}
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_tun_write_ipv4_list(app);
}
-void nfp_tunnel_write_macs(struct nfp_app *app)
+static int
+__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_tun_mac_offload_entry *entry;
- struct nfp_tun_mac_addr *payload;
- struct list_head *ptr, *storage;
- int mac_count, err, pay_size;
+ struct nfp_tun_mac_addr_offload payload;
- mutex_lock(&priv->nfp_mac_off_lock);
- if (!priv->nfp_mac_off_count) {
- mutex_unlock(&priv->nfp_mac_off_lock);
- return;
- }
+ memset(&payload, 0, sizeof(payload));
- pay_size = sizeof(struct nfp_tun_mac_addr) +
- sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
+ if (del)
+ payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
- payload = kzalloc(pay_size, GFP_KERNEL);
- if (!payload) {
- mutex_unlock(&priv->nfp_mac_off_lock);
- return;
- }
+ /* FW supports multiple MACs per cmsg but restrict to single. */
+ payload.count = cpu_to_be16(1);
+ payload.index = cpu_to_be16(idx);
+ ether_addr_copy(payload.addr, mac);
- payload->count = cpu_to_be16(priv->nfp_mac_off_count);
+ return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
+ sizeof(struct nfp_tun_mac_addr_offload),
+ &payload, GFP_KERNEL);
+}
- mac_count = 0;
- list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
- entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
- list);
- payload->addresses[mac_count].index = entry->index;
- ether_addr_copy(payload->addresses[mac_count].addr,
- entry->addr);
- mac_count++;
- }
+static bool nfp_tunnel_port_is_phy_repr(int port)
+{
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
+ return true;
- err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
- pay_size, payload, GFP_KERNEL);
+ return false;
+}
- kfree(payload);
+static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
+{
+ return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
+}
- if (err) {
- mutex_unlock(&priv->nfp_mac_off_lock);
- /* Write failed so retain list for future retry. */
- return;
- }
+static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
+{
+ return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+}
+
+static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
+{
+ return nfp_mac_idx >> 8;
+}
+
+static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
+{
+ return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+}
+
+static struct nfp_tun_offloaded_mac *
+nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
+ offloaded_macs_params);
+}
+
+static void
+nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
+ struct net_device *netdev, bool mod)
+{
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_repr *repr;
- /* If list was successfully offloaded, flush it. */
- list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
- entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
- list);
- list_del(&entry->list);
- kfree(entry);
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+
+ /* If modifing MAC, remove repr from old list first. */
+ if (mod)
+ list_del(&repr_priv->mac_list);
+
+ list_add_tail(&repr_priv->mac_list, &entry->repr_list);
}
- priv->nfp_mac_off_count = 0;
- mutex_unlock(&priv->nfp_mac_off_lock);
+ entry->ref_count++;
}
-static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
+static int
+nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
+ int port, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
- struct nfp_tun_mac_non_nfp_idx *entry;
- struct list_head *ptr, *storage;
- int idx;
-
- mutex_lock(&priv->nfp_mac_index_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
- entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
- if (entry->ifindex == ifindex) {
- idx = entry->index;
- mutex_unlock(&priv->nfp_mac_index_lock);
- return idx;
- }
+ int ida_idx = NFP_MAX_MAC_INDEX, err;
+ struct nfp_tun_offloaded_mac *entry;
+ u16 nfp_mac_idx = 0;
+
+ entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
+ if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
+ nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
+ return 0;
}
- idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
- NFP_MAX_MAC_INDEX, GFP_KERNEL);
- if (idx < 0) {
- mutex_unlock(&priv->nfp_mac_index_lock);
- return idx;
+ /* Assign a global index if non-repr or MAC address is now shared. */
+ if (entry || !port) {
+ ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (ida_idx < 0)
+ return ida_idx;
+
+ nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
+ } else {
+ nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
}
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
- mutex_unlock(&priv->nfp_mac_index_lock);
- return -ENOMEM;
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto err_free_ida;
+ }
+
+ ether_addr_copy(entry->addr, netdev->dev_addr);
+ INIT_LIST_HEAD(&entry->repr_list);
+
+ if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
+ &entry->ht_node,
+ offloaded_macs_params)) {
+ err = -ENOMEM;
+ goto err_free_entry;
+ }
+ }
+
+ err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
+ nfp_mac_idx, false);
+ if (err) {
+ /* If not shared then free. */
+ if (!entry->ref_count)
+ goto err_remove_hash;
+ goto err_free_ida;
}
- entry->ifindex = ifindex;
- entry->index = idx;
- list_add_tail(&entry->list, &priv->nfp_mac_index_list);
- mutex_unlock(&priv->nfp_mac_index_lock);
- return idx;
+ entry->index = nfp_mac_idx;
+ nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
+
+ return 0;
+
+err_remove_hash:
+ rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
+ offloaded_macs_params);
+err_free_entry:
+ kfree(entry);
+err_free_ida:
+ if (ida_idx != NFP_MAX_MAC_INDEX)
+ ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+
+ return err;
}
-static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
+static int
+nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
+ u8 *mac, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
- struct nfp_tun_mac_non_nfp_idx *entry;
- struct list_head *ptr, *storage;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_tun_offloaded_mac *entry;
+ struct nfp_repr *repr;
+ int ida_idx;
+
+ entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
+ if (!entry)
+ return 0;
+
+ entry->ref_count--;
+ /* If del is part of a mod then mac_list is still in use elsewheree. */
+ if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+ list_del(&repr_priv->mac_list);
+ }
- mutex_lock(&priv->nfp_mac_index_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
- entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
- if (entry->ifindex == ifindex) {
- ida_simple_remove(&priv->nfp_mac_off_ids,
- entry->index);
- list_del(&entry->list);
- kfree(entry);
- break;
+ /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
+ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
+ u16 nfp_mac_idx;
+ int port, err;
+
+ repr_priv = list_first_entry(&entry->repr_list,
+ struct nfp_flower_repr_priv,
+ mac_list);
+ repr = repr_priv->nfp_repr;
+ port = nfp_repr_get_port_id(repr->netdev);
+ nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
+ if (err) {
+ nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
+ netdev_name(netdev));
+ return 0;
}
+
+ ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
+ ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ entry->index = nfp_mac_idx;
+ return 0;
}
- mutex_unlock(&priv->nfp_mac_index_lock);
-}
-static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
- struct nfp_app *app)
-{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_tun_mac_offload_entry *entry;
- u16 nfp_mac_idx;
- int port = 0;
+ if (entry->ref_count)
+ return 0;
- /* Check if MAC should be offloaded. */
- if (!is_valid_ether_addr(netdev->dev_addr))
- return;
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
+ &entry->ht_node,
+ offloaded_macs_params));
+ /* If MAC has global ID then extract and free the ida entry. */
+ if (nfp_tunnel_is_mac_idx_global(entry->index)) {
+ ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
+ ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ }
- if (nfp_netdev_is_nfp_repr(netdev))
+ kfree(entry);
+
+ return __nfp_tunnel_offload_mac(app, mac, 0, true);
+}
+
+static int
+nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
+ enum nfp_flower_mac_offload_cmd cmd)
+{
+ struct nfp_flower_non_repr_priv *nr_priv = NULL;
+ bool non_repr = false, *mac_offloaded;
+ u8 *off_mac = NULL;
+ int err, port = 0;
+
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_repr *repr;
+
+ repr = netdev_priv(netdev);
+ if (repr->app != app)
+ return 0;
+
+ repr_priv = repr->app_priv;
+ mac_offloaded = &repr_priv->mac_offloaded;
+ off_mac = &repr_priv->offloaded_mac_addr[0];
port = nfp_repr_get_port_id(netdev);
- else if (!nfp_fl_is_netdev_to_offload(netdev))
- return;
+ if (!nfp_tunnel_port_is_phy_repr(port))
+ return 0;
+ } else if (nfp_fl_is_netdev_to_offload(netdev)) {
+ nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
+ if (!nr_priv)
+ return -ENOMEM;
+
+ mac_offloaded = &nr_priv->mac_offloaded;
+ off_mac = &nr_priv->offloaded_mac_addr[0];
+ non_repr = true;
+ } else {
+ return 0;
+ }
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
- return;
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ err = -EINVAL;
+ goto err_put_non_repr_priv;
}
- if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
- NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
- nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
- } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
- NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
- port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
- nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
- } else {
- /* Must assign our own unique 8-bit index. */
- int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
+ if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
+ cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
- if (idx < 0) {
- nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
- kfree(entry);
- return;
- }
- nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+ switch (cmd) {
+ case NFP_TUNNEL_MAC_OFFLOAD_ADD:
+ err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
+ if (err)
+ goto err_put_non_repr_priv;
+
+ if (non_repr)
+ __nfp_flower_non_repr_priv_get(nr_priv);
+
+ *mac_offloaded = true;
+ ether_addr_copy(off_mac, netdev->dev_addr);
+ break;
+ case NFP_TUNNEL_MAC_OFFLOAD_DEL:
+ /* Only attempt delete if add was successful. */
+ if (!*mac_offloaded)
+ break;
+
+ if (non_repr)
+ __nfp_flower_non_repr_priv_put(nr_priv);
+
+ *mac_offloaded = false;
+
+ err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
+ false);
+ if (err)
+ goto err_put_non_repr_priv;
+
+ break;
+ case NFP_TUNNEL_MAC_OFFLOAD_MOD:
+ /* Ignore if changing to the same address. */
+ if (ether_addr_equal(netdev->dev_addr, off_mac))
+ break;
+
+ err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
+ if (err)
+ goto err_put_non_repr_priv;
+
+ /* Delete the previous MAC address. */
+ err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
+ netdev_name(netdev));
+
+ ether_addr_copy(off_mac, netdev->dev_addr);
+ break;
+ default:
+ err = -EINVAL;
+ goto err_put_non_repr_priv;
}
- entry->index = cpu_to_be16(nfp_mac_idx);
- ether_addr_copy(entry->addr, netdev->dev_addr);
+ if (non_repr)
+ __nfp_flower_non_repr_priv_put(nr_priv);
+
+ return 0;
+
+err_put_non_repr_priv:
+ if (non_repr)
+ __nfp_flower_non_repr_priv_put(nr_priv);
- mutex_lock(&priv->nfp_mac_off_lock);
- priv->nfp_mac_off_count++;
- list_add_tail(&entry->list, &priv->nfp_mac_off_list);
- mutex_unlock(&priv->nfp_mac_off_lock);
+ return err;
}
int nfp_tunnel_mac_event_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event, void *ptr)
{
- if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
- /* If non-nfp netdev then free its offload index. */
- if (nfp_fl_is_netdev_to_offload(netdev))
- nfp_tun_del_mac_idx(app, netdev->ifindex);
- } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
- event == NETDEV_REGISTER) {
- nfp_tun_add_to_mac_offload_list(netdev, app);
-
- /* Force a list write to keep NFP up to date. */
- nfp_tunnel_write_macs(app);
+ int err;
+
+ if (event == NETDEV_DOWN) {
+ err = nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_DEL);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
+ netdev_name(netdev));
+ } else if (event == NETDEV_UP) {
+ err = nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_ADD);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
+ netdev_name(netdev));
+ } else if (event == NETDEV_CHANGEADDR) {
+ /* Only offload addr change if netdev is already up. */
+ if (!(netdev->flags & IFF_UP))
+ return NOTIFY_OK;
+
+ err = nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_MOD);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
+ netdev_name(netdev));
}
return NOTIFY_OK;
}
@@ -660,68 +830,62 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ /* Initialise rhash for MAC offload tracking. */
+ err = rhashtable_init(&priv->tun.offloaded_macs,
+ &offloaded_macs_params);
+ if (err)
+ return err;
- /* Initialise priv data for MAC offloading. */
- priv->nfp_mac_off_count = 0;
- mutex_init(&priv->nfp_mac_off_lock);
- INIT_LIST_HEAD(&priv->nfp_mac_off_list);
- mutex_init(&priv->nfp_mac_index_lock);
- INIT_LIST_HEAD(&priv->nfp_mac_index_list);
- ida_init(&priv->nfp_mac_off_ids);
+ ida_init(&priv->tun.mac_off_ids);
/* Initialise priv data for IPv4 offloading. */
- mutex_init(&priv->nfp_ipv4_off_lock);
- INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
+ mutex_init(&priv->tun.ipv4_off_lock);
+ INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
/* Initialise priv data for neighbour offloading. */
- spin_lock_init(&priv->nfp_neigh_off_lock);
- INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
- priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
+ spin_lock_init(&priv->tun.neigh_off_lock);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list);
+ priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
+
+ err = register_netevent_notifier(&priv->tun.neigh_nb);
+ if (err) {
+ rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
+ nfp_check_rhashtable_empty, NULL);
+ return err;
+ }
- return register_netevent_notifier(&priv->nfp_tun_neigh_nb);
+ return 0;
}
void nfp_tunnel_config_stop(struct nfp_app *app)
{
- struct nfp_tun_mac_offload_entry *mac_entry;
struct nfp_flower_priv *priv = app->priv;
struct nfp_ipv4_route_entry *route_entry;
- struct nfp_tun_mac_non_nfp_idx *mac_idx;
struct nfp_ipv4_addr_entry *ip_entry;
struct list_head *ptr, *storage;
- unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
-
- /* Free any memory that may be occupied by MAC list. */
- list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
- mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
- list);
- list_del(&mac_entry->list);
- kfree(mac_entry);
- }
-
- /* Free any memory that may be occupied by MAC index list. */
- list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
- mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
- list);
- list_del(&mac_idx->list);
- kfree(mac_idx);
- }
+ unregister_netevent_notifier(&priv->tun.neigh_nb);
- ida_destroy(&priv->nfp_mac_off_ids);
+ ida_destroy(&priv->tun.mac_off_ids);
/* Free any memory that may be occupied by ipv4 list. */
- list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
list_del(&ip_entry->list);
kfree(ip_entry);
}
/* Free any memory that may be occupied by the route list. */
- list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
list);
list_del(&route_entry->list);
kfree(route_entry);
}
+
+ /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
+ rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
+ nfp_check_rhashtable_empty, NULL);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 24a90163775e..3b0955d34716 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -554,7 +554,6 @@ struct qed_hwfn {
u8 dp_level;
char name[NAME_SIZE];
- bool first_on_engine;
bool hw_init_done;
u8 num_funcs_on_engine;
@@ -805,6 +804,9 @@ struct qed_dev {
u32 mcp_nvm_resp;
+ /* Recovery */
+ bool recov_in_prog;
+
/* Linux specific here */
struct qede_dev *edev;
struct pci_dev *pdev;
@@ -944,6 +946,7 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
+void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index c2ad405b2f50..35c9f484eb9f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2129,6 +2129,7 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
rdma_tasks);
/* no need for break since RoCE coexist with Ethernet */
}
+ /* fall through */
case QED_PCI_ETH:
{
struct qed_eth_pf_params *p_params =
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 2ecaaaa4469a..e2cbd77646a2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1959,11 +1959,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
(p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
- /* Cleanup chip from previous driver if such remains exist */
- rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
- if (rc)
- return rc;
-
/* Sanity check before the PF init sequence that uses DMAE */
rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
if (rc)
@@ -2007,17 +2002,15 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 enable)
+int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool b_enable)
{
- u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+ u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
- /* Change PF in PXP */
- qed_wr(p_hwfn, p_ptt,
- PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+ /* Configure the PF's internal FID_enable for master transactions */
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
- /* wait until value is set - try for 1 second every 50us */
+ /* Wait until value is set - try for 1 second every 50us */
for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
val = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
@@ -2071,13 +2064,19 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ BIT(p_hwfn->abs_pf_id));
+}
+
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
{
struct qed_load_req_params load_req_params;
u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
- int rc = 0, mfw_rc, i;
+ int rc = 0, i;
u16 ether_type;
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
@@ -2092,7 +2091,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
}
for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ p_hwfn = &cdev->hwfns[i];
/* If management didn't provide a default, set one of our own */
if (!p_hwfn->hw_info.mtu) {
@@ -2105,9 +2104,6 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
continue;
}
- /* Enable DMAE in PXP */
- rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
-
rc = qed_calc_hw_mode(p_hwfn);
if (rc)
return rc;
@@ -2144,12 +2140,43 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
"Load request was sent. Load code: 0x%x\n",
load_code);
+ /* Only relevant for recovery:
+ * Clear the indication after LOAD_REQ is responded by the MFW.
+ */
+ cdev->recov_in_prog = false;
+
qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
- p_hwfn->first_on_engine = (load_code ==
- FW_MSG_CODE_DRV_LOAD_ENGINE);
+ /* Clean up chip from previous driver if such remains exist.
+ * This is not needed when the PF is the first one on the
+ * engine, since afterwards we are going to init the FW.
+ */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
+ rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->rel_pf_id, false);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Final cleanup failed\n");
+ goto load_err;
+ }
+ }
+
+ /* Log and clear previous pglue_b errors if such exist */
+ qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* Enable the PF's internal FID_enable in the PXP */
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ true);
+ if (rc)
+ goto load_err;
+
+ /* Clear the pglue_b was_error indication.
+ * In E4 it must be done after the BME and the internal
+ * FID_enable for the PF are set, since VDMs may cause the
+ * indication to be set again.
+ */
+ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
@@ -2180,39 +2207,29 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
break;
}
- if (rc)
+ if (rc) {
DP_NOTICE(p_hwfn,
"init phase failed for loadcode 0x%x (rc %d)\n",
- load_code, rc);
+ load_code, rc);
+ goto load_err;
+ }
- /* ACK mfw regardless of success or failure of initialization */
- mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_LOAD_DONE,
- 0, &load_code, &param);
+ rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
return rc;
- if (mfw_rc) {
- DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
- return mfw_rc;
- }
-
- /* Check if there is a DID mismatch between nvm-cfg/efuse */
- if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
- DP_NOTICE(p_hwfn,
- "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn,
QED_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
- mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
- &load_code, &param);
- if (mfw_rc) {
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+ &resp, &param);
+ if (rc) {
DP_NOTICE(p_hwfn,
"Failed to send DCBX attention request\n");
- return mfw_rc;
+ return rc;
}
p_hwfn->hw_init_done = true;
@@ -2261,6 +2278,12 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
}
return 0;
+
+load_err:
+ /* The MFW load lock should be released also when initialization fails.
+ */
+ qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ return rc;
}
#define QED_HW_STOP_RETRY_LIMIT (10)
@@ -2273,6 +2296,9 @@ static void qed_hw_timers_stop(struct qed_dev *cdev,
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ if (cdev->recov_in_prog)
+ return;
+
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
@@ -2335,12 +2361,14 @@ int qed_hw_stop(struct qed_dev *cdev)
p_hwfn->hw_init_done = false;
/* Send unload command to MCP */
- rc = qed_mcp_unload_req(p_hwfn, p_ptt);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed sending a UNLOAD_REQ command. rc = %d.\n",
- rc);
- rc2 = -EINVAL;
+ if (!cdev->recov_in_prog) {
+ rc = qed_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
}
qed_slowpath_irq_sync(p_hwfn);
@@ -2382,27 +2410,31 @@ int qed_hw_stop(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
- qed_mcp_unload_done(p_hwfn, p_ptt);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed sending a UNLOAD_DONE command. rc = %d.\n",
- rc);
- rc2 = -EINVAL;
+ if (!cdev->recov_in_prog) {
+ rc = qed_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
}
}
- if (IS_PF(cdev)) {
+ if (IS_PF(cdev) && !cdev->recov_in_prog) {
p_hwfn = QED_LEADING_HWFN(cdev);
p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
- /* Disable DMAE in PXP - in CMT, this should only be done for
- * first hw-function, and only after all transactions have
- * stopped for all active hw-functions.
+ /* Clear the PF's internal FID_enable in the PXP.
+ * In CMT this should only be done for first hw-function, and
+ * only after all transactions have stopped for all active
+ * hw-functions.
*/
- rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
if (rc) {
DP_NOTICE(p_hwfn,
- "qed_change_pci_hwfn failed. rc = %d.\n", rc);
+ "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
rc2 = -EINVAL;
}
}
@@ -2502,9 +2534,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
}
- /* Clean Previous errors if such exist */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+ /* Clean previous pglue_b errors if such exist */
+ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -3440,6 +3471,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
void __iomem *p_doorbells,
enum qed_pci_personality personality)
{
+ struct qed_dev *cdev = p_hwfn->cdev;
int rc = 0;
/* Split PCI bars evenly between hwfns */
@@ -3492,7 +3524,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Sending a mailbox to the MFW should be done after qed_get_hw_info()
* is called as it sets the ports number in an engine.
*/
- if (IS_LEAD_HWFN(p_hwfn)) {
+ if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) {
rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index acccd85170aa..e4b4e3b78e8a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -473,6 +473,18 @@ int
qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
/**
+ * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_enable - true/false
+ *
+ * @return int
+ */
+int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool b_enable);
+
+/**
* @brief db_recovery_add - add doorbell information to the doorbell
* recovery mechanism.
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index b13cfb449d8f..417121e74ee9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12827,7 +12827,7 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LLDP_DATA_UPDATED,
MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
- MFW_DRV_MSG_RESERVED4,
+ MFW_DRV_MSG_ERROR_RECOVERY,
MFW_DRV_MSG_BW_UPDATE,
MFW_DRV_MSG_S_TAG_UPDATE,
MFW_DRV_MSG_GET_LAN_STATS,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 70504dcf4087..72ec1c6bdf70 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -703,6 +703,17 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
int qed_status = 0;
u32 offset = 0;
+ if (p_hwfn->cdev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
+ src_addr, src_type, dst_addr, dst_type,
+ size_in_dwords);
+
+ /* Let the flow complete w/o any error handling */
+ return 0;
+ }
+
qed_dmae_opcode(p_hwfn,
(src_type == QED_DMAE_ADDRESS_GRC),
(dst_type == QED_DMAE_ADDRESS_GRC),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 92340919d852..e23980e301b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -255,112 +255,114 @@ out:
#define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
-static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
+
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
u32 tmp;
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
if (tmp & PGLUE_ATTENTION_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
- addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
- details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal write by chip to [%08x:%08x] blocked.\n"
- "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
- "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
- GET_FIELD(details,
- PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
- tmp,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
+ DP_NOTICE(p_hwfn,
+ "Illegal write by chip to [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
}
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
if (tmp & PGLUE_ATTENTION_RD_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
- addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
- details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal read by chip from [%08x:%08x] blocked.\n"
- " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
- " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
- GET_FIELD(details,
- PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
- tmp,
- GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
- : 0,
- GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
- GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
- : 0);
+ DP_NOTICE(p_hwfn,
+ "Illegal read by chip from [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
}
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
if (tmp & PGLUE_ATTENTION_ICPL_VALID)
- DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
+ DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp);
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
u32 addr_hi, addr_lo;
- addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
- addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
- DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n",
- tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
}
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
if (tmp & PGLUE_ATTENTION_ILT_VALID) {
u32 addr_hi, addr_lo, details;
- addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
- addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
- details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_DETAILS);
- DP_INFO(p_hwfn,
- "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
- details, tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
}
/* Clear the indications */
- qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2));
return 0;
}
+static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+}
+
#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
@@ -540,7 +542,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{"PGLUE misc_flr", ATTENTION_SINGLE,
NULL, MAX_BLOCK_ID},
{"PGLUE B RBC", ATTENTION_PAR_INT,
- qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
+ qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
{"PGLUE misc_mctp", ATTENTION_SINGLE,
NULL, MAX_BLOCK_ID},
{"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index d81a62ebd524..1f356ed4f761 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -431,4 +431,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 6adf5bda9811..b47352643fb5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -359,6 +359,8 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
qed_init_dp(cdev, params->dp_module, params->dp_level);
+ cdev->recov_in_prog = params->recov_in_prog;
+
rc = qed_init_pci(cdev, pdev);
if (rc) {
DP_ERR(cdev, "init pci failed\n");
@@ -2203,6 +2205,15 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
return qed_mcp_get_nvm_image(hwfn, type, buf, len);
}
+void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
+{
+ struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+
+ if (ops && ops->schedule_recovery_handler)
+ ops->schedule_recovery_handler(cookie);
+}
+
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
void *handle)
{
@@ -2226,6 +2237,23 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
+static int qed_recovery_process(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ rc = qed_start_recovery_process(p_hwfn, p_ptt);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
static int qed_update_wol(struct qed_dev *cdev, bool enabled)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
@@ -2380,6 +2408,8 @@ const struct qed_common_ops qed_common_ops_pass = {
.nvm_get_image = &qed_nvm_get_image,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
+ .recovery_process = &qed_recovery_process,
+ .recovery_prolog = &qed_recovery_prolog,
.update_drv_state = &qed_update_drv_state,
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index e7f18e34ff0d..bb8541847aa5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1070,6 +1070,27 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+ &param);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+ return rc;
+ }
+
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+ return 0;
+}
+
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_mb_params mb_params;
@@ -1528,6 +1549,60 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
return 0;
}
+u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
+ path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
+
+ proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
+ path_addr +
+ offsetof(struct public_path, process_kill)) &
+ PROCESS_KILL_COUNTER_MASK;
+
+ return proc_kill_cnt;
+}
+
+static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 proc_kill_cnt;
+
+ /* Prevent possible attentions/interrupts during the recovery handling
+ * and till its load phase, during which they will be re-enabled.
+ */
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+ DP_NOTICE(p_hwfn, "Received a process kill indication\n");
+
+ /* The following operations should be done once, and thus in CMT mode
+ * are carried out by only the first HW function.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(cdev))
+ return;
+
+ if (cdev->recov_in_prog) {
+ DP_NOTICE(p_hwfn,
+ "Ignoring the indication since a recovery process is already in progress\n");
+ return;
+ }
+
+ cdev->recov_in_prog = true;
+
+ proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
+ DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
+
+ qed_schedule_recovery_handler(p_hwfn);
+}
+
static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum MFW_DRV_MSG_TYPE type)
@@ -1758,6 +1833,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_ERROR_RECOVERY:
+ qed_mcp_handle_process_kill(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_GET_LAN_STATS:
case MFW_DRV_MSG_GET_FCOE_STATS:
case MFW_DRV_MSG_GET_ISCSI_STATS:
@@ -2303,6 +2381,43 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+
+ if (cdev->recov_in_prog) {
+ DP_NOTICE(p_hwfn,
+ "Avoid triggering a recovery since such a process is already in progress\n");
+ return -EAGAIN;
+ }
+
+ DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
+
+ return 0;
+}
+
+#define QED_RECOVERY_PROLOG_SLEEP_MS 100
+
+int qed_recovery_prolog(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ int rc;
+
+ /* Allow ongoing PCIe transactions to complete */
+ msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
+
+ /* Clear the PF's internal FID_enable in the PXP */
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+
+ return rc;
+}
+
static int
qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index eddf67798d6f..6e1d72a669ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -441,6 +441,38 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_mcp_drv_version *p_ver);
/**
+ * @brief Read the MFW process kill counter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Trigger a recovery process
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief A recovery handler must call this function as its first step.
+ * It is assumed that the handler is not run from an interrupt context.
+ *
+ * @param cdev
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_recovery_prolog(struct qed_dev *cdev);
+
+/**
* @brief Notify MFW about the change in base device properties
*
* @param p_hwfn
@@ -801,6 +833,16 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_load_req_params *p_params);
/**
+ * @brief Sends a LOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
* @brief Sends a UNLOAD_REQ message to the MFW
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 8939ed6e08b7..5ce825ca5f24 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -518,6 +518,8 @@
0x180824UL
#define MISC_REG_AEU_GENERAL_ATTN_0 \
0x008400UL
+#define MISC_REG_AEU_GENERAL_ATTN_35 \
+ 0x00848cUL
#define CAU_REG_SB_ADDR_MEMORY \
0x1c8000UL
#define CAU_REG_SB_VAR_MEMORY \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index eb88bbc6b193..3e0f7c46bb1b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -790,6 +790,17 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
SPQ_HIGH_PRI_RESERVE_DEFAULT);
}
+static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ if (!fw_return_code)
+ return;
+
+ if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
+ p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
+ *fw_return_code = RDMA_RETURN_OK;
+}
+
/* Avoid overriding of SPQ entries when getting out-of-order completions, by
* marking the completions in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
@@ -825,6 +836,17 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
+ if (p_hwfn->cdev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
+ p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+
+ /* Let the flow complete w/o any error handling */
+ qed_spq_recov_set_ret_code(p_ent, fw_return_code);
+ return 0;
+ }
+
/* Complete the entry */
rc = qed_spq_fill_entry(p_hwfn, p_ent);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 71a7af134dd8..9faaa6df78ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -4449,6 +4449,13 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
pci_disable_sriov(cdev->pdev);
+ if (cdev->recov_in_prog) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "Skip SRIOV disable operations in the device since a recovery is in progress\n");
+ goto out;
+ }
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
@@ -4488,7 +4495,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
qed_ptt_release(hwfn, ptt);
}
-
+out:
qed_iov_set_vfs_to_disable(cdev, false);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 613249d1e967..843416404aeb 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -162,6 +162,7 @@ struct qede_rdma_dev {
struct list_head entry;
struct list_head rdma_event_list;
struct workqueue_struct *rdma_wq;
+ bool exp_recovery;
};
struct qede_ptp;
@@ -264,6 +265,7 @@ struct qede_dev {
enum QEDE_STATE {
QEDE_STATE_CLOSED,
QEDE_STATE_OPEN,
+ QEDE_STATE_RECOVERY,
};
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
@@ -462,6 +464,7 @@ struct qede_fastpath {
#define QEDE_CSUM_UNNECESSARY BIT(1)
#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
+#define QEDE_SP_RECOVERY 0
#define QEDE_SP_RX_MODE 1
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 5a74fcbdbc2b..6b4d96635238 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -133,23 +133,12 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qede_remove(struct pci_dev *pdev);
static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
+static void qede_schedule_recovery_handler(void *dev);
+static void qede_recovery_handler(struct qede_dev *edev);
static void qede_get_eth_tlv_data(void *edev, void *data);
static void qede_get_generic_tlv_data(void *edev,
struct qed_generic_tlvs *data);
-/* The qede lock is used to protect driver state change and driver flows that
- * are not reentrant.
- */
-void __qede_lock(struct qede_dev *edev)
-{
- mutex_lock(&edev->qede_lock);
-}
-
-void __qede_unlock(struct qede_dev *edev)
-{
- mutex_unlock(&edev->qede_lock);
-}
-
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -231,6 +220,7 @@ static struct qed_eth_cb_ops qede_ll_ops = {
.arfs_filter_op = qede_arfs_filter_op,
#endif
.link_update = qede_link_update,
+ .schedule_recovery_handler = qede_schedule_recovery_handler,
.get_generic_tlv_data = qede_get_generic_tlv_data,
.get_protocol_tlv_data = qede_get_eth_tlv_data,
},
@@ -950,11 +940,57 @@ err:
return -ENOMEM;
}
+/* The qede lock is used to protect driver state change and driver flows that
+ * are not reentrant.
+ */
+void __qede_lock(struct qede_dev *edev)
+{
+ mutex_lock(&edev->qede_lock);
+}
+
+void __qede_unlock(struct qede_dev *edev)
+{
+ mutex_unlock(&edev->qede_lock);
+}
+
+/* This version of the lock should be used when acquiring the RTNL lock is also
+ * needed in addition to the internal qede lock.
+ */
+void qede_lock(struct qede_dev *edev)
+{
+ rtnl_lock();
+ __qede_lock(edev);
+}
+
+void qede_unlock(struct qede_dev *edev)
+{
+ __qede_unlock(edev);
+ rtnl_unlock();
+}
+
static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ /* The locking scheme depends on the specific flag:
+ * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
+ * ensure that ongoing flows are ended and new ones are not started.
+ * In other cases - only the internal qede lock should be acquired.
+ */
+
+ if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ qede_lock(edev);
+ qede_recovery_handler(edev);
+ qede_unlock(edev);
+ }
+
__qede_lock(edev);
if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
@@ -1031,6 +1067,7 @@ static void qede_log_probe(struct qede_dev *edev)
enum qede_probe_mode {
QEDE_PROBE_NORMAL,
+ QEDE_PROBE_RECOVERY,
};
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
@@ -1051,6 +1088,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
probe_params.dp_module = dp_module;
probe_params.dp_level = dp_level;
probe_params.is_vf = is_vf;
+ probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) {
rc = -ENODEV;
@@ -1078,11 +1116,20 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (rc)
goto err2;
- edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
- dp_level);
- if (!edev) {
- rc = -ENOMEM;
- goto err2;
+ if (mode != QEDE_PROBE_RECOVERY) {
+ edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+ dp_level);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+ } else {
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ edev = netdev_priv(ndev);
+ edev->cdev = cdev;
+ memset(&edev->stats, 0, sizeof(edev->stats));
+ memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
}
if (is_vf)
@@ -1090,28 +1137,31 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_init_ndev(edev);
- rc = qede_rdma_dev_add(edev);
+ rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
if (rc)
goto err3;
- /* Prepare the lock prior to the registration of the netdev,
- * as once it's registered we might reach flows requiring it
- * [it's even possible to reach a flow needing it directly
- * from there, although it's unlikely].
- */
- INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
- mutex_init(&edev->qede_lock);
- rc = register_netdev(edev->ndev);
- if (rc) {
- DP_NOTICE(edev, "Cannot register net-device\n");
- goto err4;
+ if (mode != QEDE_PROBE_RECOVERY) {
+ /* Prepare the lock prior to the registration of the netdev,
+ * as once it's registered we might reach flows requiring it
+ * [it's even possible to reach a flow needing it directly
+ * from there, although it's unlikely].
+ */
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
+
+ rc = register_netdev(edev->ndev);
+ if (rc) {
+ DP_NOTICE(edev, "Cannot register net-device\n");
+ goto err4;
+ }
}
edev->ops->common->set_name(cdev, edev->ndev->name);
/* PTP not supported on VFs */
if (!is_vf)
- qede_ptp_enable(edev, true);
+ qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
@@ -1126,7 +1176,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
err4:
- qede_rdma_dev_remove(edev);
+ qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
err3:
free_netdev(edev->ndev);
err2:
@@ -1162,6 +1212,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
enum qede_remove_mode {
QEDE_REMOVE_NORMAL,
+ QEDE_REMOVE_RECOVERY,
};
static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
@@ -1172,15 +1223,19 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
- qede_rdma_dev_remove(edev);
- unregister_netdev(ndev);
- cancel_delayed_work_sync(&edev->sp_task);
+ qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
- qede_ptp_disable(edev);
+ if (mode != QEDE_REMOVE_RECOVERY) {
+ unregister_netdev(ndev);
- edev->ops->common->set_power_state(cdev, PCI_D0);
+ cancel_delayed_work_sync(&edev->sp_task);
- pci_set_drvdata(pdev, NULL);
+ edev->ops->common->set_power_state(cdev, PCI_D0);
+
+ pci_set_drvdata(pdev, NULL);
+ }
+
+ qede_ptp_disable(edev);
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
@@ -1194,7 +1249,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
* [e.g., QED register callbacks] won't break anything when
* accessing the netdevice.
*/
- free_netdev(ndev);
+ if (mode != QEDE_REMOVE_RECOVERY)
+ free_netdev(ndev);
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
@@ -1539,6 +1595,58 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
return 0;
}
+static void qede_empty_tx_queue(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct netdev_queue *netdev_txq;
+ int rc, len = 0;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
+
+ while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl)) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
+ txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev,
+ "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
+ txq->index,
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+}
+
+static void qede_empty_tx_queues(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_queue(i)
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ struct qede_fastpath *fp;
+
+ fp = &edev->fp_array[i];
+ qede_empty_tx_queue(edev,
+ &fp->txq[cos]);
+ }
+ }
+}
+
/* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev)
{
@@ -2053,6 +2161,7 @@ out:
enum qede_unload_mode {
QEDE_UNLOAD_NORMAL,
+ QEDE_UNLOAD_RECOVERY,
};
static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
@@ -2068,7 +2177,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
- edev->state = QEDE_STATE_CLOSED;
+ if (mode != QEDE_UNLOAD_RECOVERY)
+ edev->state = QEDE_STATE_CLOSED;
qede_rdma_dev_event_close(edev);
@@ -2076,17 +2186,20 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
- /* Reset the link */
- memset(&link_params, 0, sizeof(link_params));
- link_params.link_up = false;
- edev->ops->common->set_link(edev->cdev, &link_params);
- rc = qede_stop_queues(edev);
- if (rc) {
- qede_sync_free_irqs(edev);
- goto out;
- }
+ if (mode != QEDE_UNLOAD_RECOVERY) {
+ /* Reset the link */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = false;
+ edev->ops->common->set_link(edev->cdev, &link_params);
- DP_INFO(edev, "Stopped Queues\n");
+ rc = qede_stop_queues(edev);
+ if (rc) {
+ qede_sync_free_irqs(edev);
+ goto out;
+ }
+
+ DP_INFO(edev, "Stopped Queues\n");
+ }
qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
@@ -2102,18 +2215,26 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_napi_disable_remove(edev);
+ if (mode == QEDE_UNLOAD_RECOVERY)
+ qede_empty_tx_queues(edev);
+
qede_free_mem_load(edev);
qede_free_fp_array(edev);
out:
if (!is_locked)
__qede_unlock(edev);
+
+ if (mode != QEDE_UNLOAD_RECOVERY)
+ DP_NOTICE(edev, "Link is down\n");
+
DP_INFO(edev, "Ending qede unload\n");
}
enum qede_load_mode {
QEDE_LOAD_NORMAL,
QEDE_LOAD_RELOAD,
+ QEDE_LOAD_RECOVERY,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
@@ -2293,6 +2414,77 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
}
}
+static void qede_schedule_recovery_handler(void *dev)
+{
+ struct qede_dev *edev = dev;
+
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev,
+ "Avoid scheduling a recovery handling since already in recovery state\n");
+ return;
+ }
+
+ set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a recovery handler\n");
+}
+
+static void qede_recovery_failed(struct qede_dev *edev)
+{
+ netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
+
+ netif_device_detach(edev->ndev);
+
+ if (edev->cdev)
+ edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
+}
+
+static void qede_recovery_handler(struct qede_dev *edev)
+{
+ u32 curr_state = edev->state;
+ int rc;
+
+ DP_NOTICE(edev, "Starting a recovery process\n");
+
+ /* No need to acquire first the qede_lock since is done by qede_sp_task
+ * before calling this function.
+ */
+ edev->state = QEDE_STATE_RECOVERY;
+
+ edev->ops->common->recovery_prolog(edev->cdev);
+
+ if (curr_state == QEDE_STATE_OPEN)
+ qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
+
+ __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
+
+ rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
+ IS_VF(edev), QEDE_PROBE_RECOVERY);
+ if (rc) {
+ edev->cdev = NULL;
+ goto err;
+ }
+
+ if (curr_state == QEDE_STATE_OPEN) {
+ rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
+ if (rc)
+ goto err;
+
+ qede_config_rx_mode(edev->ndev);
+ udp_tunnel_get_rx_info(edev->ndev);
+ }
+
+ edev->state = curr_state;
+
+ DP_NOTICE(edev, "Recovery handling is done\n");
+
+ return;
+
+err:
+ qede_recovery_failed(edev);
+}
+
static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 1900bf7e67d1..ffabc2d2f082 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -50,6 +50,8 @@ static void _qede_rdma_dev_add(struct qede_dev *edev)
if (!qedr_drv)
return;
+ /* Leftovers from previous error recovery */
+ edev->rdma_info.exp_recovery = false;
edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
edev->ndev);
}
@@ -87,21 +89,26 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
destroy_workqueue(edev->rdma_info.rdma_wq);
}
-int qede_rdma_dev_add(struct qede_dev *edev)
+int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
{
- int rc = 0;
+ int rc;
- if (qede_rdma_supported(edev)) {
- rc = qede_rdma_create_wq(edev);
- if (rc)
- return rc;
+ if (!qede_rdma_supported(edev))
+ return 0;
- INIT_LIST_HEAD(&edev->rdma_info.entry);
- mutex_lock(&qedr_dev_list_lock);
- list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
- _qede_rdma_dev_add(edev);
- mutex_unlock(&qedr_dev_list_lock);
- }
+ /* Cannot start qedr while recovering since it wasn't fully stopped */
+ if (recovery)
+ return 0;
+
+ rc = qede_rdma_create_wq(edev);
+ if (rc)
+ return rc;
+
+ INIT_LIST_HEAD(&edev->rdma_info.entry);
+ mutex_lock(&qedr_dev_list_lock);
+ list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
+ _qede_rdma_dev_add(edev);
+ mutex_unlock(&qedr_dev_list_lock);
return rc;
}
@@ -110,19 +117,30 @@ static void _qede_rdma_dev_remove(struct qede_dev *edev)
{
if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
qedr_drv->remove(edev->rdma_info.qedr_dev);
- edev->rdma_info.qedr_dev = NULL;
}
-void qede_rdma_dev_remove(struct qede_dev *edev)
+void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery)
{
if (!qede_rdma_supported(edev))
return;
- qede_rdma_destroy_wq(edev);
- mutex_lock(&qedr_dev_list_lock);
- _qede_rdma_dev_remove(edev);
- list_del(&edev->rdma_info.entry);
- mutex_unlock(&qedr_dev_list_lock);
+ /* Cannot remove qedr while recovering since it wasn't fully stopped */
+ if (!recovery) {
+ qede_rdma_destroy_wq(edev);
+ mutex_lock(&qedr_dev_list_lock);
+ if (!edev->rdma_info.exp_recovery)
+ _qede_rdma_dev_remove(edev);
+ edev->rdma_info.qedr_dev = NULL;
+ list_del(&edev->rdma_info.entry);
+ mutex_unlock(&qedr_dev_list_lock);
+ } else {
+ if (!edev->rdma_info.exp_recovery) {
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_rdma_dev_remove(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+ }
+ edev->rdma_info.exp_recovery = true;
+ }
}
static void _qede_rdma_dev_open(struct qede_dev *edev)
@@ -204,7 +222,8 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv)
mutex_lock(&qedr_dev_list_lock);
list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
- if (edev->rdma_info.qedr_dev)
+ /* If device has experienced recovery it was already removed */
+ if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery)
_qede_rdma_dev_remove(edev);
}
qedr_drv = NULL;
@@ -284,6 +303,10 @@ static void qede_rdma_add_event(struct qede_dev *edev,
{
struct qede_rdma_event_work *event_node;
+ /* If a recovery was experienced avoid adding the event */
+ if (edev->rdma_info.exp_recovery)
+ return;
+
if (!edev->rdma_info.qedr_dev)
return;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 16d0479f6891..7a873002e626 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -396,7 +396,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
- const unsigned char *addr, u16 vid, u16 flags)
+ const unsigned char *addr, u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 7e011c1c1e6e..cfb67b746595 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -454,14 +454,14 @@ static void hardware_init(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- int i;
+ int i;
/* Turn off the printer multiplexer on the 8012. */
for (i = 0; i < 8; i++)
outb(mux_8012[i], ioaddr + PAR_DATA);
write_reg_high(ioaddr, CMR1, CMR1h_RESET);
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++)
write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
write_reg_high(ioaddr, CMR2, lp->addr_mode);
@@ -471,15 +471,15 @@ static void hardware_init(struct net_device *dev)
(read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
}
- write_reg(ioaddr, CMR2, CMR2_IRQOUT);
- write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
/* Enable the interrupt line from the serial port. */
outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
/* Unmask the interesting interrupts. */
- write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
- write_reg_high(ioaddr, IMR, ISRh_RxErr);
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
lp->tx_unit_busy = 0;
lp->pac_cnt_in_tx_buf = 0;
@@ -610,10 +610,12 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
write_reg(ioaddr, CMR2, CMR2_NULL);
write_reg(ioaddr, IMR, 0);
- if (net_debug > 5) printk(KERN_DEBUG "%s: In interrupt ", dev->name);
- while (--boguscount > 0) {
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: In interrupt ", dev->name);
+ while (--boguscount > 0) {
int status = read_nibble(ioaddr, ISR);
- if (net_debug > 5) printk("loop status %02x..", status);
+ if (net_debug > 5)
+ printk("loop status %02x..", status);
if (status & (ISR_RxOK<<3)) {
handled = 1;
@@ -640,7 +642,8 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
} while (--boguscount > 0);
} else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
handled = 1;
- if (net_debug > 6) printk("handling Tx done..");
+ if (net_debug > 6)
+ printk("handling Tx done..");
/* Clear the Tx interrupt. We should check for too many failures
and reinitialize the adapter. */
write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
@@ -680,7 +683,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
break;
} else
break;
- }
+ }
/* This following code fixes a rare (and very difficult to track down)
problem where the adapter forgets its ethernet address. */
@@ -694,7 +697,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
}
/* Tell the adapter that it can go back to using the output line as IRQ. */
- write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
/* Enable the physical interrupt line, which is sure to be low until.. */
outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
/* .. we enable the interrupt sources. */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index abb94c543aa2..3e650bd9e94b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -229,7 +229,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
-static int use_dac = -1;
static struct {
u32 msg_enable;
} debug = { -1 };
@@ -639,6 +638,7 @@ struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
struct net_device *dev;
+ struct phy_device *phydev;
struct napi_struct napi;
u32 msg_enable;
u16 mac_version;
@@ -679,12 +679,12 @@ struct rtl8169_private {
} wk;
unsigned supports_gmii:1;
- struct mii_bus *mii_bus;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
+ const char *fw_name;
struct rtl_fw {
const struct firmware *fw;
@@ -697,15 +697,12 @@ struct rtl8169_private {
size_t size;
} phy_action;
} *rtl_fw;
-#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
u32 ocp_base;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
-module_param(use_dac, int, 0);
-MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_SOFTDEP("pre: realtek");
@@ -745,6 +742,16 @@ static void rtl_unlock_work(struct rtl8169_private *tp)
mutex_unlock(&tp->wk.mutex);
}
+static void rtl_lock_config_regs(struct rtl8169_private *tp)
+{
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+}
+
+static void rtl_unlock_config_regs(struct rtl8169_private *tp)
+{
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+}
+
static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
{
pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
@@ -1278,11 +1285,6 @@ static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
-static u16 rtl_get_events(struct rtl8169_private *tp)
-{
- return RTL_R16(tp, IntrStatus);
-}
-
static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
{
RTL_W16(tp, IntrStatus, bits);
@@ -1313,7 +1315,7 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
- struct phy_device *phydev = dev->phydev;
+ struct phy_device *phydev = tp->phydev;
if (!netif_running(dev))
return;
@@ -1431,7 +1433,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
};
u8 options;
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
@@ -1479,7 +1481,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
}
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
}
@@ -1508,11 +1510,6 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
-{
- return rtl_chip_infos[tp->mac_version].fw_name;
-}
-
static void rtl8169_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1522,7 +1519,7 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
strlcpy(info->driver, MODULENAME, sizeof(info->driver));
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
- if (!IS_ERR_OR_NULL(rtl_fw))
+ if (rtl_fw)
strlcpy(info->fw_version, rtl_fw->version,
sizeof(info->fw_version));
}
@@ -1987,6 +1984,196 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
return 0;
}
+static int rtl_get_eee_supp(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+ int ret;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ phy_write(phydev, 0x1f, 0x0a5c);
+ ret = phy_read(phydev, 0x12);
+ phy_write(phydev, 0x1f, 0x0000);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ return ret;
+}
+
+static int rtl_get_eee_lpadv(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+ int ret;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ phy_write(phydev, 0x1f, 0x0a5d);
+ ret = phy_read(phydev, 0x11);
+ phy_write(phydev, 0x1f, 0x0000);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ return ret;
+}
+
+static int rtl_get_eee_adv(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+ int ret;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ phy_write(phydev, 0x1f, 0x0a5d);
+ ret = phy_read(phydev, 0x10);
+ phy_write(phydev, 0x1f, 0x0000);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ return ret;
+}
+
+static int rtl_set_eee_adv(struct rtl8169_private *tp, int val)
+{
+ struct phy_device *phydev = tp->phydev;
+ int ret = 0;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ phy_write(phydev, 0x1f, 0x0a5d);
+ phy_write(phydev, 0x10, val);
+ phy_write(phydev, 0x1f, 0x0000);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ return ret;
+}
+
+static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = tp_to_dev(tp);
+ int ret;
+
+ pm_runtime_get_noresume(d);
+
+ if (!pm_runtime_active(d)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* Get Supported EEE */
+ ret = rtl_get_eee_supp(tp);
+ if (ret < 0)
+ goto out;
+ data->supported = mmd_eee_cap_to_ethtool_sup_t(ret);
+
+ /* Get advertisement EEE */
+ ret = rtl_get_eee_adv(tp);
+ if (ret < 0)
+ goto out;
+ data->advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
+ data->eee_enabled = !!data->advertised;
+
+ /* Get LP advertisement EEE */
+ ret = rtl_get_eee_lpadv(tp);
+ if (ret < 0)
+ goto out;
+ data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
+ data->eee_active = !!(data->advertised & data->lp_advertised);
+out:
+ pm_runtime_put_noidle(d);
+ return ret < 0 ? ret : 0;
+}
+
+static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = tp_to_dev(tp);
+ int old_adv, adv = 0, cap, ret;
+
+ pm_runtime_get_noresume(d);
+
+ if (!dev->phydev || !pm_runtime_active(d)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (dev->phydev->autoneg == AUTONEG_DISABLE ||
+ dev->phydev->duplex != DUPLEX_FULL) {
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ /* Get Supported EEE */
+ ret = rtl_get_eee_supp(tp);
+ if (ret < 0)
+ goto out;
+ cap = ret;
+
+ ret = rtl_get_eee_adv(tp);
+ if (ret < 0)
+ goto out;
+ old_adv = ret;
+
+ if (data->eee_enabled) {
+ adv = !data->advertised ? cap :
+ ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
+ /* Mask prohibited EEE modes */
+ adv &= ~dev->phydev->eee_broken_modes;
+ }
+
+ if (old_adv != adv) {
+ ret = rtl_set_eee_adv(tp, adv);
+ if (ret < 0)
+ goto out;
+
+ /* Restart autonegotiation so the new modes get sent to the
+ * link partner.
+ */
+ ret = phy_restart_aneg(dev->phydev);
+ }
+
+out:
+ pm_runtime_put_noidle(d);
+ return ret < 0 ? ret : 0;
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
@@ -2003,10 +2190,20 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = phy_ethtool_nway_reset,
+ .get_eee = rtl8169_get_eee,
+ .set_eee = rtl8169_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
+static void rtl_enable_eee(struct rtl8169_private *tp)
+{
+ int supported = rtl_get_eee_supp(tp);
+
+ if (supported > 0)
+ rtl_set_eee_adv(tp, supported);
+}
+
static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{
/*
@@ -2199,7 +2396,7 @@ static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
if (fw->size % FW_OPCODE_SIZE)
goto out;
- strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
+ strlcpy(version, tp->fw_name, RTL_VER_SIZE);
pa->code = (__le32 *)fw->data;
pa->size = fw->size / FW_OPCODE_SIZE;
@@ -2374,20 +2571,18 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
static void rtl_release_firmware(struct rtl8169_private *tp)
{
- if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
+ if (tp->rtl_fw) {
release_firmware(tp->rtl_fw->fw);
kfree(tp->rtl_fw);
+ tp->rtl_fw = NULL;
}
- tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
}
static void rtl_apply_firmware(struct rtl8169_private *tp)
{
- struct rtl_fw *rtl_fw = tp->rtl_fw;
-
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
- if (!IS_ERR_OR_NULL(rtl_fw))
- rtl_phy_write_fw(tp, rtl_fw);
+ if (tp->rtl_fw)
+ rtl_phy_write_fw(tp, tp->rtl_fw);
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2398,6 +2593,33 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
rtl_apply_firmware(tp);
}
+static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
+{
+ rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
+}
+
+static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ phy_write(phydev, 0x1f, 0x0007);
+ phy_write(phydev, 0x1e, 0x0020);
+ phy_set_bits(phydev, 0x15, BIT(8));
+
+ phy_write(phydev, 0x1f, 0x0005);
+ phy_write(phydev, 0x05, 0x8b85);
+ phy_set_bits(phydev, 0x06, BIT(13));
+
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
+{
+ phy_write(tp->phydev, 0x1f, 0x0a43);
+ phy_set_bits(tp->phydev, 0x11, BIT(4));
+ phy_write(tp->phydev, 0x1f, 0x0000);
+}
+
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3165,22 +3387,8 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
- /* EEE setting */
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0007);
- rtl_writephy(tp, 0x0e, 0x003c);
- rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0006);
- rtl_writephy(tp, 0x0d, 0x0000);
+ rtl8168f_config_eee_phy(tp);
+ rtl_enable_eee(tp);
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
@@ -3215,6 +3423,9 @@ static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x05, 0x8b86);
rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl8168f_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3348,22 +3559,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
- /* eee setting */
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0007);
- rtl_writephy(tp, 0x0e, 0x003c);
- rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0000);
-
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
@@ -3371,6 +3566,30 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8168g_disable_aldps(struct rtl8169_private *tp)
+{
+ phy_write(tp->phydev, 0x1f, 0x0a43);
+ phy_clear_bits(tp->phydev, 0x10, BIT(2));
+}
+
+static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ phy_write(phydev, 0x1f, 0x0bcc);
+ phy_clear_bits(phydev, 0x14, BIT(8));
+
+ phy_write(phydev, 0x1f, 0x0a44);
+ phy_set_bits(phydev, 0x11, BIT(7) | BIT(6));
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x8084);
+ phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13));
+ phy_set_bits(phydev, 0x10, BIT(12) | BIT(1) | BIT(0));
+
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
{
rtl_apply_firmware(tp);
@@ -3397,14 +3616,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0a44);
rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8084);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
- rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
+ rtl8168g_phy_adjust_10m_aldps(tp);
/* EEE auto-fallback function */
rtl_writephy(tp, 0x1f, 0x0a4b);
@@ -3429,17 +3641,16 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x14, 0x9065);
rtl_writephy(tp, 0x14, 0x1065);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
{
rtl_apply_firmware(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3544,12 +3755,9 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
rtl_writephy(tp, 0x1f, 0x0000);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3617,12 +3825,9 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
rtl_writephy(tp, 0x1f, 0x0000);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3632,16 +3837,7 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
- /* patch 10M & ALDPS */
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100);
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8084);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
- rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable EEE auto-fallback function */
rtl_writephy(tp, 0x1f, 0x0a4b);
@@ -3659,26 +3855,14 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
rtl_writephy(tp, 0x1f, 0x0000);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
{
- /* patch 10M & ALDPS */
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100);
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8084);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
- rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable UC LPF tune function */
rtl_writephy(tp, 0x1f, 0x0a43);
@@ -3750,12 +3934,9 @@ static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x14, 0x1065);
rtl_writephy(tp, 0x1f, 0x0000);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
@@ -3994,24 +4175,24 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
}
/* We may have called phy_speed_down before */
- phy_speed_up(dev->phydev);
+ phy_speed_up(tp->phydev);
- genphy_soft_reset(dev->phydev);
+ genphy_soft_reset(tp->phydev);
/* It was reported that several chips end up with 10MBit/Half on a
* 1GBit link after resuming from S3. For whatever reason the PHY on
* these chips doesn't properly start a renegotiation when soft-reset.
* Explicitly requesting a renegotiation fixes this.
*/
- if (dev->phydev->autoneg == AUTONEG_ENABLE)
- phy_restart_aneg(dev->phydev);
+ if (tp->phydev->autoneg == AUTONEG_ENABLE)
+ phy_restart_aneg(tp->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
{
rtl_lock_work(tp);
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
RTL_R32(tp, MAC4);
@@ -4022,7 +4203,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
rtl_rar_exgmac_set(tp, addr);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
rtl_unlock_work(tp);
}
@@ -4049,10 +4230,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
if (!netif_running(dev))
return -ENODEV;
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return phy_mii_ioctl(tp->phydev, ifr, cmd);
}
static void rtl_init_mdio_ops(struct rtl8169_private *tp)
@@ -4101,15 +4284,10 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
{
- struct phy_device *phydev;
-
if (!__rtl8169_get_wol(tp))
return false;
- /* phydev may not be attached to netdevice */
- phydev = mdiobus_get_phy(tp->mii_bus, 0);
-
- phy_speed_down(phydev, false);
+ phy_speed_down(tp->phydev, false);
rtl_wol_suspend_quirk(tp);
return true;
@@ -4178,7 +4356,7 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
break;
}
- phy_resume(tp->dev->phydev);
+ phy_resume(tp->phydev);
/* give MAC/PHY some time to resume */
msleep(20);
}
@@ -4234,18 +4412,18 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
if (tp->jumbo_ops.enable) {
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
tp->jumbo_ops.enable(tp);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
}
}
static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
if (tp->jumbo_ops.disable) {
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
tp->jumbo_ops.disable(tp);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
}
}
@@ -4378,21 +4556,20 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
-static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
+static void rtl_request_firmware(struct rtl8169_private *tp)
{
struct rtl_fw *rtl_fw;
- const char *name;
int rc = -ENOMEM;
- name = rtl_lookup_firmware_name(tp);
- if (!name)
- goto out_no_firmware;
+ /* firmware loaded already or no firmware available */
+ if (tp->rtl_fw || !tp->fw_name)
+ return;
rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
if (!rtl_fw)
goto err_warn;
- rc = request_firmware(&rtl_fw->fw, name, tp_to_dev(tp));
+ rc = request_firmware(&rtl_fw->fw, tp->fw_name, tp_to_dev(tp));
if (rc < 0)
goto err_free;
@@ -4401,7 +4578,7 @@ static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
goto err_release_firmware;
tp->rtl_fw = rtl_fw;
-out:
+
return;
err_release_firmware:
@@ -4410,16 +4587,7 @@ err_free:
kfree(rtl_fw);
err_warn:
netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
- name, rc);
-out_no_firmware:
- tp->rtl_fw = NULL;
- goto out;
-}
-
-static void rtl_request_firmware(struct rtl8169_private *tp)
-{
- if (IS_ERR(tp->rtl_fw))
- rtl_request_uncached_firmware(tp);
+ tp->fw_name, rc);
}
static void rtl_rx_close(struct rtl8169_private *tp)
@@ -4566,13 +4734,13 @@ static void rtl_set_rx_mode(struct net_device *dev)
static void rtl_hw_start(struct rtl8169_private *tp)
{
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
tp->hw_start(tp);
rtl_set_rx_max_size(tp);
rtl_set_rx_tx_desc_registers(tp);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R8(tp, IntrMask);
@@ -4696,18 +4864,10 @@ static void rtl_enable_clock_request(struct rtl8169_private *tp)
PCI_EXP_LNKCTL_CLKREQ_EN);
}
-static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
+static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp)
{
- u8 data;
-
- data = RTL_R8(tp, Config3);
-
- if (enable)
- data |= Rdy_to_L23;
- else
- data &= ~Rdy_to_L23;
-
- RTL_W8(tp, Config3, data);
+ /* work around an issue when PCI reset occurs during L2/L3 state */
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23);
}
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
@@ -4965,6 +5125,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
/* Adjust EEE LED frequency */
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl8168_config_eee_mac(tp);
+
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
@@ -4997,6 +5159,8 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
+
+ rtl8168_config_eee_mac(tp);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5028,7 +5192,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
};
rtl_hw_start_8168f(tp);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
@@ -5059,10 +5223,12 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
/* Adjust EEE LED frequency */
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl8168_config_eee_mac(tp);
+
rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
}
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
@@ -5161,6 +5327,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
/* Adjust EEE LED frequency */
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl8168_config_eee_mac(tp);
+
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -5168,7 +5336,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
rtl_writephy(tp, 0x1f, 0x0c42);
rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
@@ -5241,11 +5409,13 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
/* Adjust EEE LED frequency */
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl8168_config_eee_mac(tp);
+
rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
}
static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
@@ -5516,7 +5686,7 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5551,7 +5721,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
}
static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5565,7 +5735,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5665,11 +5835,6 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
}
-static inline void *rtl8169_align(void *data)
-{
- return (void *)ALIGN((long)data, 16);
-}
-
static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
struct RxDesc *desc)
{
@@ -5682,15 +5847,13 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
if (!data)
return NULL;
- if (rtl8169_align(data) != data) {
- kfree(data);
- data = kmalloc_node(R8169_RX_BUF_SIZE + 15, GFP_KERNEL, node);
- if (!data)
- return NULL;
+ /* Memory should be properly aligned, but better check. */
+ if (!IS_ALIGNED((unsigned long)data, 8)) {
+ netdev_err_once(tp->dev, "RX buffer not 8-byte-aligned\n");
+ goto err_out;
}
- mapping = dma_map_single(d, rtl8169_align(data), R8169_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ mapping = dma_map_single(d, data, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
if (net_ratelimit())
netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
@@ -6193,16 +6356,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
- /* The infamous DAC f*ckup only happens at boot time */
- if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
- netif_info(tp, intr, dev, "disabling PCI DAC\n");
- tp->cp_cmd &= ~PCIDAC;
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- dev->features &= ~NETIF_F_HIGHDMA;
- }
-
- rtl8169_hw_reset(tp);
-
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
@@ -6298,7 +6451,6 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
struct sk_buff *skb;
struct device *d = tp_to_dev(tp);
- data = rtl8169_align(data);
dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
prefetch(data);
skb = napi_alloc_skb(&tp->napi, pkt_size);
@@ -6409,7 +6561,7 @@ release_descriptor:
static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
- u16 status = rtl_get_events(tp);
+ u16 status = RTL_R16(tp, IntrStatus);
u16 irq_mask = RTL_R16(tp, IntrMask);
if (status == 0xffff || !(status & irq_mask))
@@ -6420,8 +6572,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
goto out;
}
- if (status & LinkChg && tp->dev->phydev)
- phy_mac_interrupt(tp->dev->phydev);
+ if (status & LinkChg)
+ phy_mac_interrupt(tp->phydev);
if (unlikely(status & RxFIFOOver &&
tp->mac_version == RTL_GIGA_MAC_VER_11)) {
@@ -6512,12 +6664,12 @@ static void r8169_phylink_handler(struct net_device *ndev)
}
if (net_ratelimit())
- phy_print_status(ndev->phydev);
+ phy_print_status(tp->phydev);
}
static int r8169_phy_connect(struct rtl8169_private *tp)
{
- struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0);
+ struct phy_device *phydev = tp->phydev;
phy_interface_t phy_mode;
int ret;
@@ -6544,7 +6696,7 @@ static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- phy_stop(dev->phydev);
+ phy_stop(tp->phydev);
napi_disable(&tp->napi);
netif_stop_queue(dev);
@@ -6586,7 +6738,7 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
- phy_disconnect(dev->phydev);
+ phy_disconnect(tp->phydev);
pci_free_irq(pdev, 0, tp);
@@ -6637,10 +6789,6 @@ static int rtl_open(struct net_device *dev)
if (retval < 0)
goto err_free_rx_1;
- INIT_WORK(&tp->wk.work, rtl_task);
-
- smp_mb();
-
rtl_request_firmware(tp);
retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
@@ -6667,7 +6815,7 @@ static int rtl_open(struct net_device *dev)
if (!rtl8169_init_counter_offsets(tp))
netif_warn(tp, hw, dev, "counter reset/update failed\n");
- phy_start(dev->phydev);
+ phy_start(tp->phydev);
netif_start_queue(dev);
rtl_unlock_work(tp);
@@ -6756,7 +6904,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
if (!netif_running(dev))
return;
- phy_stop(dev->phydev);
+ phy_stop(tp->phydev);
netif_device_detach(dev);
rtl_lock_work(tp);
@@ -6791,14 +6939,13 @@ static void __rtl8169_resume(struct net_device *dev)
rtl_pll_power_up(tp);
rtl8169_init_phy(dev, tp);
- phy_start(tp->dev->phydev);
+ phy_start(tp->phydev);
rtl_lock_work(tp);
napi_enable(&tp->napi);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ rtl_reset_work(tp);
rtl_unlock_work(tp);
-
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
static int rtl8169_resume(struct device *device)
@@ -6935,7 +7082,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
netif_napi_del(&tp->napi);
unregister_netdev(dev);
- mdiobus_unregister(tp->mii_bus);
+ mdiobus_unregister(tp->phydev->mdio.bus);
rtl_release_firmware(tp);
@@ -6995,9 +7142,9 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
unsigned int flags;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
flags = PCI_IRQ_LEGACY;
} else {
flags = PCI_IRQ_ALL_TYPES;
@@ -7042,7 +7189,6 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
static int r8169_mdio_register(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
- struct phy_device *phydev;
struct mii_bus *new_bus;
int ret;
@@ -7064,16 +7210,14 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
if (ret)
return ret;
- phydev = mdiobus_get_phy(new_bus, 0);
- if (!phydev) {
+ tp->phydev = mdiobus_get_phy(new_bus, 0);
+ if (!tp->phydev) {
mdiobus_unregister(new_bus);
return -ENODEV;
}
/* PHY will be woken up in rtl_open() */
- phy_suspend(phydev);
-
- tp->mii_bus = new_bus;
+ phy_suspend(tp->phydev);
return 0;
}
@@ -7171,6 +7315,32 @@ static void rtl_disable_clk(void *data)
clk_disable_unprepare(data);
}
+static int rtl_get_ether_clk(struct rtl8169_private *tp)
+{
+ struct device *d = tp_to_dev(tp);
+ struct clk *clk;
+ int rc;
+
+ clk = devm_clk_get(d, "ether_clk");
+ if (IS_ERR(clk)) {
+ rc = PTR_ERR(clk);
+ if (rc == -ENOENT)
+ /* clk-core allows NULL (for suspend / resume) */
+ rc = 0;
+ else if (rc != -EPROBE_DEFER)
+ dev_err(d, "failed to get clk: %d\n", rc);
+ } else {
+ tp->clk = clk;
+ rc = clk_prepare_enable(clk);
+ if (rc)
+ dev_err(d, "failed to enable clk: %d\n", rc);
+ else
+ rc = devm_add_action_or_reset(d, rtl_disable_clk, clk);
+ }
+
+ return rc;
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -7192,30 +7362,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->supports_gmii = cfg->has_gmii;
/* Get the *optional* external "ether_clk" used on some boards */
- tp->clk = devm_clk_get(&pdev->dev, "ether_clk");
- if (IS_ERR(tp->clk)) {
- rc = PTR_ERR(tp->clk);
- if (rc == -ENOENT) {
- /* clk-core allows NULL (for suspend / resume) */
- tp->clk = NULL;
- } else if (rc == -EPROBE_DEFER) {
- return rc;
- } else {
- dev_err(&pdev->dev, "failed to get clk: %d\n", rc);
- return rc;
- }
- } else {
- rc = clk_prepare_enable(tp->clk);
- if (rc) {
- dev_err(&pdev->dev, "failed to enable clk: %d\n", rc);
- return rc;
- }
-
- rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk,
- tp->clk);
- if (rc)
- return rc;
- }
+ rc = rtl_get_ether_clk(tp);
+ if (rc)
+ return rc;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -7260,13 +7409,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
- if (sizeof(dma_addr_t) > 4 && (use_dac == 1 || (use_dac == -1 &&
- tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
+ if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
-
- /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
- if (!pci_is_pcie(pdev))
- tp->cp_cmd |= PCIDAC;
dev->features |= NETIF_F_HIGHDMA;
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -7300,6 +7444,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->saved_wolopts = __rtl8169_get_wol(tp);
mutex_init(&tp->wk.mutex);
+ INIT_WORK(&tp->wk.work, rtl_task);
u64_stats_init(&tp->rx_stats.syncp);
u64_stats_init(&tp->tx_stats.syncp);
@@ -7365,7 +7510,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask;
tp->coalesce_info = cfg->coalesce_info;
- tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+ tp->fw_name = rtl_chip_infos[chipset].fw_name;
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
&tp->counters_phys_addr,
@@ -7406,7 +7551,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_mdio_unregister:
- mdiobus_unregister(tp->mii_bus);
+ mdiobus_unregister(tp->phydev->mdio.bus);
return rc;
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 6213827e3956..62a205eba9f7 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2725,7 +2725,7 @@ rocker_fdb_offload_notify(struct rocker_port *rocker_port,
info.vid = recv_info->vid;
info.offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
- rocker_port->dev, &info.info);
+ rocker_port->dev, &info.info, NULL);
}
static void rocker_switchdev_event_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 6473cc68c2d5..bea7895930f6 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1833,10 +1833,10 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
rtnl_lock();
if (learned && removing)
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
- lw->ofdpa_port->dev, &info.info);
+ lw->ofdpa_port->dev, &info.info, NULL);
else if (learned && !removing)
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
- lw->ofdpa_port->dev, &info.info);
+ lw->ofdpa_port->dev, &info.info, NULL);
rtnl_unlock();
kfree(work);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 2f2bda68d861..6062e99fa69b 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6041,10 +6041,11 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
- /* MUM and SUC firmware share the same partition type */
{ NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" },
{ NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
- { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }
+ { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" },
+ { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" },
+ { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" },
};
#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
@@ -6074,8 +6075,15 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
if (rc)
return rc;
+ if (protected &&
+ (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS &&
+ type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS))
+ /* Hide protected partitions that don't provide defaults. */
+ return -ENODEV;
+
if (protected)
- return -ENODEV; /* hide it */
+ /* Protected partitions are read only. */
+ erase_size = 0;
/* If we've already exposed a partition of this type, hide this
* duplicate. All operations on MTDs are keyed by the type anyway,
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index dfad93fca0a6..295ec1787b9f 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -2074,22 +2074,26 @@ fail:
static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+ MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
+ NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
+ 1);
BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
NULL, 0, NULL);
+
return rc;
}
static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
MCDI_DECLARE_BUF(outbuf,
MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
size_t outlen;
@@ -2098,6 +2102,8 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
+ MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -2147,15 +2153,51 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
- int rc;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
+ size_t outlen;
+ int rc, rc2;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+ /* Always set this flag. Old firmware ignores it */
+ MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
+ 1);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ outbuf, sizeof(outbuf), &outlen);
+ if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+ rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
+ if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+ netif_err(efx, drv, efx->net_dev,
+ "NVRAM update failed verification with code 0x%x\n",
+ rc2);
+ switch (rc2) {
+ case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
+ break;
+ case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
+ rc = -EIO;
+ break;
+ case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
+ case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
+ rc = -EINVAL;
+ break;
+ case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
+ case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
+ case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+ rc = -EPERM;
+ break;
+ default:
+ netif_err(efx, drv, efx->net_dev,
+ "Unknown response to NVRAM_UPDATE_FINISH\n");
+ rc = -EIO;
+ }
+ }
+
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 4ac30b6e5dab..0d03e0577d85 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -66,6 +66,9 @@ int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
part->mtd.writesize = 1;
+ if (!(part->mtd.flags & MTD_NO_ERASE))
+ part->mtd.flags |= MTD_WRITEABLE;
+
part->mtd.owner = THIS_MODULE;
part->mtd.priv = efx;
part->mtd.name = part->name;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6209cc1fb305..f194235153f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -105,6 +105,16 @@ config DWMAC_OXNAS
This selects the Oxford Semiconductor OXNASSoC glue layer support for
the stmmac device driver. This driver is used for OX820.
+config DWMAC_QCOM_ETHQOS
+ tristate "Qualcomm ETHQOS support"
+ default ARCH_QCOM
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ help
+ Support for the Qualcomm ETHQOS core.
+
+ This selects the Qualcomm ETHQOS glue layer support for the
+ stmmac device driver.
+
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index bf09701d2623..c529c21e9bdd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
+obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
new file mode 100644
index 000000000000..7ec895407d23
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-19, Linaro Limited
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define RGMII_IO_MACRO_CONFIG 0x0
+#define SDCC_HC_REG_DLL_CONFIG 0x4
+#define SDCC_HC_REG_DDR_CONFIG 0xC
+#define SDCC_HC_REG_DLL_CONFIG2 0x10
+#define SDC4_STATUS 0x14
+#define SDCC_USR_CTL 0x18
+#define RGMII_IO_MACRO_CONFIG2 0x1C
+#define RGMII_IO_MACRO_DEBUG1 0x20
+#define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
+
+/* RGMII_IO_MACRO_CONFIG fields */
+#define RGMII_CONFIG_FUNC_CLK_EN BIT(30)
+#define RGMII_CONFIG_POS_NEG_DATA_SEL BIT(23)
+#define RGMII_CONFIG_GPIO_CFG_RX_INT GENMASK(21, 20)
+#define RGMII_CONFIG_GPIO_CFG_TX_INT GENMASK(19, 17)
+#define RGMII_CONFIG_MAX_SPD_PRG_9 GENMASK(16, 8)
+#define RGMII_CONFIG_MAX_SPD_PRG_2 GENMASK(7, 6)
+#define RGMII_CONFIG_INTF_SEL GENMASK(5, 4)
+#define RGMII_CONFIG_BYPASS_TX_ID_EN BIT(3)
+#define RGMII_CONFIG_LOOPBACK_EN BIT(2)
+#define RGMII_CONFIG_PROG_SWAP BIT(1)
+#define RGMII_CONFIG_DDR_MODE BIT(0)
+
+/* SDCC_HC_REG_DLL_CONFIG fields */
+#define SDCC_DLL_CONFIG_DLL_RST BIT(30)
+#define SDCC_DLL_CONFIG_PDN BIT(29)
+#define SDCC_DLL_CONFIG_MCLK_FREQ GENMASK(26, 24)
+#define SDCC_DLL_CONFIG_CDR_SELEXT GENMASK(23, 20)
+#define SDCC_DLL_CONFIG_CDR_EXT_EN BIT(19)
+#define SDCC_DLL_CONFIG_CK_OUT_EN BIT(18)
+#define SDCC_DLL_CONFIG_CDR_EN BIT(17)
+#define SDCC_DLL_CONFIG_DLL_EN BIT(16)
+#define SDCC_DLL_MCLK_GATING_EN BIT(5)
+#define SDCC_DLL_CDR_FINE_PHASE GENMASK(3, 2)
+
+/* SDCC_HC_REG_DDR_CONFIG fields */
+#define SDCC_DDR_CONFIG_PRG_DLY_EN BIT(31)
+#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY GENMASK(26, 21)
+#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE GENMASK(29, 27)
+#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN BIT(30)
+#define SDCC_DDR_CONFIG_PRG_RCLK_DLY GENMASK(8, 0)
+
+/* SDCC_HC_REG_DLL_CONFIG2 fields */
+#define SDCC_DLL_CONFIG2_DLL_CLOCK_DIS BIT(21)
+#define SDCC_DLL_CONFIG2_MCLK_FREQ_CALC GENMASK(17, 10)
+#define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL GENMASK(3, 2)
+#define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW BIT(1)
+#define SDCC_DLL_CONFIG2_DDR_CAL_EN BIT(0)
+
+/* SDC4_STATUS bits */
+#define SDC4_STATUS_DLL_LOCK BIT(7)
+
+/* RGMII_IO_MACRO_CONFIG2 fields */
+#define RGMII_CONFIG2_RSVD_CONFIG15 GENMASK(31, 17)
+#define RGMII_CONFIG2_RGMII_CLK_SEL_CFG BIT(16)
+#define RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN BIT(13)
+#define RGMII_CONFIG2_CLK_DIVIDE_SEL BIT(12)
+#define RGMII_CONFIG2_RX_PROG_SWAP BIT(7)
+#define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6)
+#define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5)
+
+struct ethqos_emac_por {
+ unsigned int offset;
+ unsigned int value;
+};
+
+struct qcom_ethqos {
+ struct platform_device *pdev;
+ void __iomem *rgmii_base;
+
+ unsigned int rgmii_clk_rate;
+ struct clk *rgmii_clk;
+ unsigned int speed;
+
+ const struct ethqos_emac_por *por;
+ unsigned int num_por;
+};
+
+static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
+{
+ return readl(ethqos->rgmii_base + offset);
+}
+
+static void rgmii_writel(struct qcom_ethqos *ethqos,
+ int value, unsigned int offset)
+{
+ writel(value, ethqos->rgmii_base + offset);
+}
+
+static void rgmii_updatel(struct qcom_ethqos *ethqos,
+ int mask, int val, unsigned int offset)
+{
+ unsigned int temp;
+
+ temp = rgmii_readl(ethqos, offset);
+ temp = (temp & ~(mask)) | val;
+ rgmii_writel(ethqos, temp, offset);
+}
+
+static void rgmii_dump(struct qcom_ethqos *ethqos)
+{
+ dev_dbg(&ethqos->pdev->dev, "Rgmii register dump\n");
+ dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG: %x\n",
+ rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG: %x\n",
+ rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DDR_CONFIG: %x\n",
+ rgmii_readl(ethqos, SDCC_HC_REG_DDR_CONFIG));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG2: %x\n",
+ rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG2));
+ dev_dbg(&ethqos->pdev->dev, "SDC4_STATUS: %x\n",
+ rgmii_readl(ethqos, SDC4_STATUS));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_USR_CTL: %x\n",
+ rgmii_readl(ethqos, SDCC_USR_CTL));
+ dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG2: %x\n",
+ rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG2));
+ dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_DEBUG1: %x\n",
+ rgmii_readl(ethqos, RGMII_IO_MACRO_DEBUG1));
+ dev_dbg(&ethqos->pdev->dev, "EMAC_SYSTEM_LOW_POWER_DEBUG: %x\n",
+ rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
+}
+
+/* Clock rates */
+#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
+#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
+#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
+
+static void
+ethqos_update_rgmii_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ ethqos->rgmii_clk_rate = RGMII_1000_NOM_CLK_FREQ;
+ break;
+
+ case SPEED_100:
+ ethqos->rgmii_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
+ break;
+
+ case SPEED_10:
+ ethqos->rgmii_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
+ break;
+ }
+
+ clk_set_rate(ethqos->rgmii_clk, ethqos->rgmii_clk_rate);
+}
+
+static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
+{
+ rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
+ RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
+}
+
+static const struct ethqos_emac_por emac_v2_3_0_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x00C01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
+{
+ unsigned int val;
+ int retry = 1000;
+
+ /* Set CDR_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
+ SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set CDR_EXT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
+ SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Clear CK_OUT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set DLL_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Wait for CK_OUT_EN clear */
+ do {
+ val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
+ val &= SDCC_DLL_CONFIG_CK_OUT_EN;
+ if (!val)
+ break;
+ mdelay(1);
+ retry--;
+ } while (retry > 0);
+ if (!retry)
+ dev_err(&ethqos->pdev->dev, "Clear CK_OUT_EN timedout\n");
+
+ /* Set CK_OUT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Wait for CK_OUT_EN set */
+ retry = 1000;
+ do {
+ val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
+ val &= SDCC_DLL_CONFIG_CK_OUT_EN;
+ if (val)
+ break;
+ mdelay(1);
+ retry--;
+ } while (retry > 0);
+ if (!retry)
+ dev_err(&ethqos->pdev->dev, "Set CK_OUT_EN timedout\n");
+
+ /* Set DDR_CAL_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
+ SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
+ 0, SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
+ 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
+ BIT(2), SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_HC_REG_DLL_CONFIG2);
+
+ return 0;
+}
+
+static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
+{
+ /* Disable loopback mode */
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG2);
+
+ /* Select RGMII, write 0 to interface select */
+ rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
+ 0, RGMII_IO_MACRO_CONFIG);
+
+ switch (ethqos->speed) {
+ case SPEED_1000:
+ rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
+
+ /* Set PRG_RCLK_DLY to 57 for 1.8 ns delay */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
+ 57, SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
+ SDCC_DDR_CONFIG_PRG_DLY_EN,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ break;
+
+ case SPEED_100:
+ rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
+ BIT(6), RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ /* Write 0x5 to PRG_RCLK_DLY_CODE */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
+ (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ break;
+
+ case SPEED_10:
+ rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
+ BIT(12) | GENMASK(9, 8),
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ /* Write 0x5 to PRG_RCLK_DLY_CODE */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
+ (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ break;
+ default:
+ dev_err(&ethqos->pdev->dev,
+ "Invalid speed %d\n", ethqos->speed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ethqos_configure(struct qcom_ethqos *ethqos)
+{
+ volatile unsigned int dll_lock;
+ unsigned int i, retry = 1000;
+
+ /* Reset to POR values and enable clk */
+ for (i = 0; i < ethqos->num_por; i++)
+ rgmii_writel(ethqos, ethqos->por[i].value,
+ ethqos->por[i].offset);
+ ethqos_set_func_clk_en(ethqos);
+
+ /* Initialize the DLL first */
+
+ /* Set DLL_RST */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
+ SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set PDN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
+ SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Clear DLL_RST */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
+ SDCC_HC_REG_DLL_CONFIG);
+
+ /* Clear PDN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
+ SDCC_HC_REG_DLL_CONFIG);
+
+ if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
+ /* Set DLL_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set CK_OUT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set USR_CTL bit 26 with mask of 3 bits */
+ rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26), SDCC_USR_CTL);
+
+ /* wait for DLL LOCK */
+ do {
+ mdelay(1);
+ dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
+ if (dll_lock & SDC4_STATUS_DLL_LOCK)
+ break;
+ } while (retry > 0);
+ if (!retry)
+ dev_err(&ethqos->pdev->dev,
+ "Timeout while waiting for DLL lock\n");
+ }
+
+ if (ethqos->speed == SPEED_1000)
+ ethqos_dll_configure(ethqos);
+
+ ethqos_rgmii_macro_init(ethqos);
+
+ return 0;
+}
+
+static void ethqos_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct qcom_ethqos *ethqos = priv;
+
+ ethqos->speed = speed;
+ ethqos_update_rgmii_clk(ethqos, speed);
+ ethqos_configure(ethqos);
+}
+
+static int qcom_ethqos_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct qcom_ethqos *ethqos;
+ struct resource *res;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+
+ ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL);
+ if (!ethqos) {
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ ethqos->pdev = pdev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rgmii");
+ ethqos->rgmii_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ethqos->rgmii_base)) {
+ dev_err(&pdev->dev, "Can't get rgmii base\n");
+ ret = PTR_ERR(ethqos->rgmii_base);
+ goto err_mem;
+ }
+
+ ethqos->por = of_device_get_match_data(&pdev->dev);
+
+ ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
+ if (IS_ERR(ethqos->rgmii_clk)) {
+ ret = PTR_ERR(ethqos->rgmii_clk);
+ goto err_mem;
+ }
+
+ ret = clk_prepare_enable(ethqos->rgmii_clk);
+ if (ret)
+ goto err_mem;
+
+ ethqos->speed = SPEED_1000;
+ ethqos_update_rgmii_clk(ethqos, SPEED_1000);
+ ethqos_set_func_clk_en(ethqos);
+
+ plat_dat->bsp_priv = ethqos;
+ plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
+ plat_dat->has_gmac4 = 1;
+ plat_dat->pmt = 1;
+ plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_clk;
+
+ rgmii_dump(ethqos);
+
+ return ret;
+
+err_clk:
+ clk_disable_unprepare(ethqos->rgmii_clk);
+
+err_mem:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int qcom_ethqos_remove(struct platform_device *pdev)
+{
+ struct qcom_ethqos *ethqos;
+ int ret;
+
+ ethqos = get_stmmac_bsp_priv(&pdev->dev);
+ if (!ethqos)
+ return -ENODEV;
+
+ ret = stmmac_pltfr_remove(pdev);
+ clk_disable_unprepare(ethqos->rgmii_clk);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_ethqos_match[] = {
+ { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
+
+static struct platform_driver qcom_ethqos_driver = {
+ .probe = qcom_ethqos_probe,
+ .remove = qcom_ethqos_remove,
+ .driver = {
+ .name = "qcom-ethqos",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(qcom_ethqos_match),
+ },
+};
+module_platform_driver(qcom_ethqos_driver);
+
+MODULE_DESCRIPTION("Qualcomm ETHQOS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 63e1064b27a2..f9b42b0c20f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include "common.h"
#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
#include <linux/reset.h>
struct stmmac_resources {
@@ -174,6 +175,7 @@ struct stmmac_priv {
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
+ struct hwtstamp_config tstamp_config;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5afba69981cf..517555bcfa44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -534,7 +534,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
}
/**
- * stmmac_hwtstamp_ioctl - control hardware timestamping.
+ * stmmac_hwtstamp_set - control hardware timestamping.
* @dev: device pointer.
* @ifr: An IOCTL specific structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
@@ -544,7 +544,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
* Return Value:
* 0 on success and an appropriate -ve integer on failure.
*/
-static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
@@ -573,7 +573,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
}
if (copy_from_user(&config, ifr->ifr_data,
- sizeof(struct hwtstamp_config)))
+ sizeof(config)))
return -EFAULT;
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
@@ -765,8 +765,31 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
(u32)now.tv_sec, now.tv_nsec);
}
+ memcpy(&priv->tstamp_config, &config, sizeof(config));
+
return copy_to_user(ifr->ifr_data, &config,
- sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_hwtstamp_get - read hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function obtain the current hardware timestamping settings
+ as requested.
+ */
+static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config *config = &priv->tstamp_config;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
}
/**
@@ -3767,7 +3790,10 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ret = phy_mii_ioctl(dev->phydev, rq, cmd);
break;
case SIOCSHWTSTAMP:
- ret = stmmac_hwtstamp_ioctl(dev, rq);
+ ret = stmmac_hwtstamp_set(dev, rq);
+ break;
+ case SIOCGHWTSTAMP:
+ ret = stmmac_hwtstamp_get(dev, rq);
break;
default:
break;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index b9221fc1674d..3e7631160384 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2760,7 +2760,7 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
void __iomem *p = pci_map_rom(pdev, &size);
if (p) {
- int found;
+ int found;
found = readb(p) == 0x55 &&
readb(p + 1) == 0xaa &&
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 396e1cd10667..fec275e2208d 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -78,7 +78,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
- };
+ }
mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
mask |= BIT(slave + 4);
@@ -133,7 +133,7 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
- };
+ }
switch (slave) {
case 0:
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6d067176320f..cf22a79af66b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -963,7 +963,8 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
struct macvlan_dev *vlan = netdev_priv(dev);
int err = -EINVAL;
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 9d0504f3e3b2..65b4b0960b1e 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for AMD am79c PHYs
*
* Author: Heiko Schocher <hs@denx.de>
*
* Copyright (c) 2011 DENX Software Engineering GmbH
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
index beb3309bb0f0..0578fe75d3b6 100644
--- a/drivers/net/phy/aquantia.c
+++ b/drivers/net/phy/aquantia.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Aquantia PHY
*
* Author: Shaohui Xie <Shaohui.Xie@freescale.com>
*
* Copyright 2015 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f9432d053a22..90dc62c15fc5 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/at803x.c
*
* Driver for Atheros 803x PHY
*
* Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/phy.h>
@@ -39,9 +35,6 @@
#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
-#define AT803X_MMD_ACCESS_CONTROL 0x0D
-#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
-#define AT803X_FUNC_DATA 0x4003
#define AT803X_REG_CHIP_CONFIG 0x1f
#define AT803X_BT_BX_REG_SEL 0x8000
@@ -110,16 +103,16 @@ static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
return phy_write(phydev, AT803X_DEBUG_DATA, val);
}
-static inline int at803x_enable_rx_delay(struct phy_device *phydev)
+static inline int at803x_disable_rx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0,
- AT803X_DEBUG_RX_CLK_DLY_EN);
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0,
+ AT803X_DEBUG_RX_CLK_DLY_EN, 0);
}
-static inline int at803x_enable_tx_delay(struct phy_device *phydev)
+static inline int at803x_disable_tx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0,
- AT803X_DEBUG_TX_CLK_DLY_EN);
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5,
+ AT803X_DEBUG_TX_CLK_DLY_EN, 0);
}
/* save relevant PHY registers to private copy */
@@ -168,16 +161,9 @@ static int at803x_set_wol(struct phy_device *phydev,
if (!is_valid_ether_addr(mac))
return -EINVAL;
- for (i = 0; i < 3; i++) {
- phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
- AT803X_DEVICE_ADDR);
- phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
- offsets[i]);
- phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
- AT803X_FUNC_DATA);
- phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
- mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
- }
+ for (i = 0; i < 3; i++)
+ phy_write_mmd(phydev, AT803X_DEVICE_ADDR, offsets[i],
+ mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
value = phy_read(phydev, AT803X_INTR_ENABLE);
value |= AT803X_INTR_ENABLE_WOL;
@@ -256,15 +242,17 @@ static int at803x_config_init(struct phy_device *phydev)
return ret;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
- ret = at803x_enable_rx_delay(phydev);
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII) {
+ ret = at803x_disable_rx_delay(phydev);
if (ret < 0)
return ret;
}
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
- ret = at803x_enable_tx_delay(phydev);
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII) {
+ ret = at803x_disable_tx_delay(phydev);
if (ret < 0)
return ret;
}
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index e757b09f1889..ab8e12922bf9 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/* Broadcom Cygnus SoC internal transceivers support. */
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index e10e7b54ec4b..a75642051b8b 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "bcm-phy-lib.h"
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 81cceaa412fe..17faaefcfd60 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_BCM_PHY_LIB_H
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index a88dd14a25c0..44e6cff419a0 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Broadcom 63xx SOCs integrated PHYs
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "bcm-phy-lib.h"
#include <linux/module.h>
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 712224cc442d..b8415f8fae14 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Broadcom BCM7xxx internal transceivers support.
*
* Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index a271239748f2..f0c0eefe2202 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2011 - 2012 Cavium, Inc.
*/
@@ -221,4 +218,4 @@ static struct phy_driver bcm87xx_driver[] = {
module_phy_driver(bcm87xx_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index aa73c5cc5f86..9605d4fe540b 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/broadcom.c
*
@@ -7,11 +8,6 @@
* Copyright (c) 2006 Maciej W. Rozycki
*
* Inspired by code written by Amy Fong.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "bcm-phy-lib.h"
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index fea61c81bda9..108ed24f8489 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/cicada.c
*
@@ -6,12 +7,6 @@
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 1a4d04afb7f0..c291dc014769 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2017 NXP
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* CORTINA is a registered trademark of Cortina Systems, Inc.
*
*/
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 97162008f42b..bf39baa7f2c8 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/davicom.c
*
@@ -6,12 +7,6 @@
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 18b41bc345ab..25ef483bcc24 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the National Semiconductor DP83640 PHYTER
*
* Copyright (C) 2010 OMICRON electronics GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 24c7f149f3e6..bbd8c22067f3 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Texas Instruments DP83822 PHY
*
* Copyright (C) 2017 Texas Instruments Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/ethtool.h>
@@ -338,4 +330,4 @@ MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
MODULE_DESCRIPTION("Texas Instruments DP83822 PHY driver");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index a6b55909d1dc..f55dc907c2f3 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Texas Instruments DP83848 PHY
*
* Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -133,4 +125,4 @@ module_phy_driver(dp83848_driver);
MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver");
MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index da6a67d47ce9..8a8d9f606b3e 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Texas Instruments DP83867 PHY
*
* Copyright (C) 2015 Texas Instruments Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/ethtool.h>
@@ -357,4 +349,4 @@ MODULE_DEVICE_TABLE(mdio, dp83867_tbl);
MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 565e49e7f76f..2aa367c04a8e 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/et1011c.c
*
@@ -6,12 +7,6 @@
* Author: Chaithrika U S
*
* Copyright (c) 2008 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 72d43c88e6ff..47a8cb574c45 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Fixed MDIO bus (MDIO bus emulation with fixed PHYs)
*
@@ -5,11 +6,6 @@
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
* Copyright (c) 2006-2007 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 7d5938b87660..ebef8354bc81 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for ICPlus PHYs
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index fc0f5024a29e..02d9713318b6 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
* Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/mdio.h>
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index c8bb29ae1a2a..a93d673baf35 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/lxt.c
*
@@ -6,12 +7,6 @@
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e12f982534f..90f44ba8aca7 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/marvell.c
*
@@ -8,12 +9,6 @@
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*
* Copyright (c) 2013 Michael Stapelberg <michael@stapelberg.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 82ab6ed3b74e..38cfc923a88a 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell 10G 88x3310 PHY driver
*
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index 46fe1ae919a3..7d0f388d8db8 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/delay.h>
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index df75efa96a7d..8295bc7c8c20 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Broadcom UniMAC MDIO bus controller driver
*
* Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 15352f987bdf..5136275c8e73 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Bitbanged MDIO support.
*
@@ -11,10 +12,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -232,4 +229,4 @@ void free_mdio_bitbang(struct mii_bus *bus)
}
EXPORT_SYMBOL(free_mdio_bitbang);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 863496fa5d13..d9b54c67ef9f 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mdio-boardinfo - Collect pre-declarations for MDIO devices
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/mdio-cavium.c b/drivers/net/phy/mdio-cavium.c
index 6df2fa755bb4..1afd6fc1a351 100644
--- a/drivers/net/phy/mdio-cavium.c
+++ b/drivers/net/phy/mdio-cavium.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2009-2016 Cavium, Inc.
*/
@@ -150,4 +147,4 @@ EXPORT_SYMBOL(cavium_mdiobus_write);
MODULE_DESCRIPTION("Common code for OCTEON and Thunder MDIO bus drivers");
MODULE_AUTHOR("David Daney");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h
index 4bccd45d24e2..ed5f9bb5448d 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/phy/mdio-cavium.h
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2009-2016 Cavium, Inc.
*/
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index ea9a0e339778..1b00235d7dc5 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPIO based MDIO bitbang driver.
* Supports OpenFirmware.
@@ -14,10 +15,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -216,5 +213,5 @@ module_platform_driver(mdio_gpio_driver);
MODULE_ALIAS("platform:mdio-gpio");
MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Generic driver for MDIO bus emulation using GPIO");
diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c
index 6d24fd13ca86..0dce67672548 100644
--- a/drivers/net/phy/mdio-i2c.c
+++ b/drivers/net/phy/mdio-i2c.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MDIO I2C bridge
*
* Copyright (C) 2015-2016 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Network PHYs can appear on I2C buses when they are part of SFP module.
* This driver exposes these PHYs to the networking PHY code, allowing
* our PHY drivers access to these PHYs, and so allowing configuration
diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h
index 889ab57d7f3e..751dab281f57 100644
--- a/drivers/net/phy/mdio-i2c.h
+++ b/drivers/net/phy/mdio-i2c.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MDIO I2C bridge
*
* Copyright (C) 2015 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MDIO_I2C_H
#define MDIO_I2C_H
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
index 5bb56d126693..af3910fe8ec7 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/phy/mdio-moxart.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/* MOXA ART Ethernet (RTL8201CP) MDIO interface driver
*
* Copyright (C) 2013 Jonas Jensen <jonas.jensen@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/delay.h>
@@ -190,4 +187,4 @@ module_platform_driver(moxart_mdio_driver);
MODULE_DESCRIPTION("MOXA ART MDIO interface driver");
MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 696bdf1e4576..88d409e48c1f 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation (the "GPL").
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 (GPLv2) for more details.
- *
- * You should have received a copy of the GNU General Public License
- * version 2 (GPLv2) along with this source code.
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index fe34576262bd..6c8960df43b0 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
@@ -103,4 +100,4 @@ module_platform_driver(mdio_mux_gpio_driver);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("David Daney");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 70f6115530af..d1a8780e24d8 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Simple memory-mapped device MDIO MUX driver
*
* Author: Timur Tabi <timur@freescale.com>
*
* Copyright 2012 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/platform_device.h>
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 0a86f1e4c02f..6a1d3540210b 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
@@ -210,4 +207,4 @@ EXPORT_SYMBOL_GPL(mdio_mux_uninit);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("David Daney");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index ab6914f8bd50..8327382aa568 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2009-2015 Cavium, Inc.
*/
@@ -122,4 +119,4 @@ module_platform_driver(octeon_mdiobus_driver);
MODULE_DESCRIPTION("Cavium OCTEON MDIO bus driver");
MODULE_AUTHOR("David Daney");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 6425ce04d3f9..20ffd8fb79ce 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Allwinner EMAC MDIO interface driver
*
@@ -6,10 +7,6 @@
*
* Based on the Linux driver provided by Allwinner:
* Copyright (C) 1997 Sten Wang
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/delay.h>
@@ -179,4 +176,4 @@ module_platform_driver(sun4i_mdio_driver);
MODULE_DESCRIPTION("Allwinner EMAC MDIO interface driver");
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c
index 1546f6398831..b6128ae7f14f 100644
--- a/drivers/net/phy/mdio-thunder.c
+++ b/drivers/net/phy/mdio-thunder.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2009-2016 Cavium, Inc.
*/
@@ -151,4 +148,4 @@ static struct pci_driver thunder_mdiobus_driver = {
module_pci_driver(thunder_mdiobus_driver);
MODULE_DESCRIPTION("Cavium ThunderX MDIO bus driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 07c6048200c6..717cc2a056e8 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Applied Micro X-Gene SoC MDIO Driver
*
* Copyright (c) 2016, Applied Micro Circuits Corporation
* Author: Iyappan Subramanian <isubramanian@apm.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index 3c85f3e30baa..b1f5ccb4ad9c 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Applied Micro X-Gene SoC MDIO Driver
*
* Copyright (c) 2016, Applied Micro Circuits Corporation
* Author: Iyappan Subramanian <isubramanian@apm.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDIO_XGENE_H__
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 66b9cfe692fc..3d313585c9c1 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/* MDIO Bus interface
*
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -39,8 +34,6 @@
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <asm/irq.h>
-
#define CREATE_TRACE_POINTS
#include <trace/events/mdio.h>
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index c924700cf37b..887076292e50 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Framework for MDIO devices, other than PHYs.
*
* Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 3ddaf9595697..a238388eb1a5 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -1,20 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Amlogic Meson GXL Internal PHY Driver
*
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
* Copyright (C) 2016 BayLibre, SAS. All rights reserved.
* Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index b1f959935f50..38d5bd877346 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/micrel.c
*
@@ -8,11 +9,6 @@
* Copyright (c) 2010-2013 Micrel, Inc.
* Copyright (c) 2014 Johan Hovold <johan@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Support : Micrel Phys:
* Giga phys: ksz9021, ksz9031, ksz9131
* 100/10 Phys : ksz8001, ksz8721, ksz8737, ksz8041
@@ -426,9 +422,6 @@ static int ksz9021_config_init(struct phy_device *phydev)
return 0;
}
-#define MII_KSZ9031RN_MMD_CTRL_REG 0x0d
-#define MII_KSZ9031RN_MMD_REGDATA_REG 0x0e
-#define OP_DATA 1
#define KSZ9031_PS_TO_REG 60
/* Extended registers */
@@ -446,24 +439,6 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define MII_KSZ9031RN_EDPD 0x23
#define MII_KSZ9031RN_EDPD_ENABLE BIT(0)
-static int ksz9031_extended_write(struct phy_device *phydev,
- u8 mode, u32 dev_addr, u32 regnum, u16 val)
-{
- phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
- phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
- phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
- return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val);
-}
-
-static int ksz9031_extended_read(struct phy_device *phydev,
- u8 mode, u32 dev_addr, u32 regnum)
-{
- phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
- phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
- phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
- return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG);
-}
-
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg, size_t field_sz,
@@ -484,7 +459,7 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
return 0;
if (matches < numfields)
- newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+ newval = phy_read_mmd(phydev, 2, reg);
else
newval = 0;
@@ -498,7 +473,7 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
<< (field_sz * i));
}
- return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+ return phy_write_mmd(phydev, 2, reg, newval);
}
/* Center KSZ9031RNX FLP timing at 16ms. */
@@ -506,13 +481,13 @@ static int ksz9031_center_flp_timing(struct phy_device *phydev)
{
int result;
- result = ksz9031_extended_write(phydev, OP_DATA, 0,
- MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+ result = phy_write_mmd(phydev, 0, MII_KSZ9031RN_FLP_BURST_TX_HI,
+ 0x0006);
if (result)
return result;
- result = ksz9031_extended_write(phydev, OP_DATA, 0,
- MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
+ result = phy_write_mmd(phydev, 0, MII_KSZ9031RN_FLP_BURST_TX_LO,
+ 0x1A80);
if (result)
return result;
@@ -524,11 +499,11 @@ static int ksz9031_enable_edpd(struct phy_device *phydev)
{
int reg;
- reg = ksz9031_extended_read(phydev, OP_DATA, 0x1C, MII_KSZ9031RN_EDPD);
+ reg = phy_read_mmd(phydev, 0x1C, MII_KSZ9031RN_EDPD);
if (reg < 0)
return reg;
- return ksz9031_extended_write(phydev, OP_DATA, 0x1C, MII_KSZ9031RN_EDPD,
- reg | MII_KSZ9031RN_EDPD_ENABLE);
+ return phy_write_mmd(phydev, 0x1C, MII_KSZ9031RN_EDPD,
+ reg | MII_KSZ9031RN_EDPD_ENABLE);
}
static int ksz9031_config_init(struct phy_device *phydev)
@@ -654,7 +629,7 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev,
return 0;
if (matches < numfields)
- newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+ newval = phy_read_mmd(phydev, 2, reg);
else
newval = 0;
@@ -668,7 +643,7 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev,
<< (field_sz * i));
}
- return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+ return phy_write_mmd(phydev, 2, reg, newval);
}
static int ksz9131_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 7557bebd5d7f..c6cbb3aa8ae0 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Microchip Technology
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 3949fe299b18..db50efb30df5 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Driver for Microsemi VSC85xx PHYs
*
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 139bed2c8ab4..42282a86b680 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/national.c
*
@@ -7,12 +8,6 @@
* Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*
* Copyright (c) 2008 STMicroelectronics Limited
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 20fbd5eb56fd..909b3344babf 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Core PHY library, taken from phy.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/export.h>
#include <linux/phy.h>
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 189cd2048c3a..d12aa512b7f5 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Framework for configuring and reading PHY devices
* Based on code in sungem_phy.c and gianfar_phy.c
*
@@ -5,16 +6,8 @@
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
* Copyright (c) 2006, 2007 Maciej W. Rozycki
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -36,8 +29,6 @@
#include <linux/uaccess.h>
#include <linux/atomic.h>
-#include <asm/irq.h>
-
#define PHY_STATE_STR(_state) \
case PHY_##_state: \
return __stringify(_state); \
@@ -51,7 +42,6 @@ static const char *phy_state_to_str(enum phy_state st)
PHY_STATE_STR(RUNNING)
PHY_STATE_STR(NOLINK)
PHY_STATE_STR(FORCING)
- PHY_STATE_STR(CHANGELINK)
PHY_STATE_STR(HALTED)
PHY_STATE_STR(RESUMING)
}
@@ -795,46 +785,27 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
- * phy_start_interrupts - request and enable interrupts for a PHY device
+ * phy_request_interrupt - request interrupt for a PHY device
* @phydev: target phy_device struct
*
* Description: Request the interrupt for the given PHY.
* If this fails, then we set irq to PHY_POLL.
- * Otherwise, we enable the interrupts in the PHY.
* This should only be called with a valid IRQ number.
- * Returns 0 on success or < 0 on error.
*/
-int phy_start_interrupts(struct phy_device *phydev)
+void phy_request_interrupt(struct phy_device *phydev)
{
- if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
- IRQF_ONESHOT | IRQF_SHARED,
- phydev_name(phydev), phydev) < 0) {
- pr_warn("%s: Can't get IRQ %d (PHY)\n",
- phydev->mdio.bus->name, phydev->irq);
+ int err;
+
+ err = request_threaded_irq(phydev->irq, NULL, phy_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED,
+ phydev_name(phydev), phydev);
+ if (err) {
+ phydev_warn(phydev, "Error %d requesting IRQ %d, falling back to polling\n",
+ err, phydev->irq);
phydev->irq = PHY_POLL;
- return 0;
}
-
- return phy_enable_interrupts(phydev);
-}
-EXPORT_SYMBOL(phy_start_interrupts);
-
-/**
- * phy_stop_interrupts - disable interrupts from a PHY device
- * @phydev: target phy_device struct
- */
-int phy_stop_interrupts(struct phy_device *phydev)
-{
- int err = phy_disable_interrupts(phydev);
-
- if (err)
- phy_error(phydev);
-
- free_irq(phydev->irq, phydev);
-
- return err;
}
-EXPORT_SYMBOL(phy_stop_interrupts);
+EXPORT_SYMBOL(phy_request_interrupt);
/**
* phy_stop - Bring down the PHY link, and stop checking the status
@@ -859,6 +830,7 @@ void phy_stop(struct phy_device *phydev)
mutex_unlock(&phydev->lock);
phy_state_machine(&phydev->state_queue.work);
+ phy_stop_machine(phydev);
/* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but PHY_HALTED shall guarantee irq handler
@@ -879,33 +851,34 @@ EXPORT_SYMBOL(phy_stop);
*/
void phy_start(struct phy_device *phydev)
{
- int err = 0;
+ int err;
mutex_lock(&phydev->lock);
- switch (phydev->state) {
- case PHY_READY:
- phydev->state = PHY_UP;
- break;
- case PHY_HALTED:
- /* if phy was suspended, bring the physical link up again */
- __phy_resume(phydev);
+ if (phydev->state != PHY_READY && phydev->state != PHY_HALTED) {
+ WARN(1, "called from state %s\n",
+ phy_state_to_str(phydev->state));
+ goto out;
+ }
- /* make sure interrupts are re-enabled for the PHY */
- if (phy_interrupt_is_valid(phydev)) {
- err = phy_enable_interrupts(phydev);
- if (err < 0)
- break;
- }
+ /* if phy was suspended, bring the physical link up again */
+ __phy_resume(phydev);
- phydev->state = PHY_RESUMING;
- break;
- default:
- break;
+ /* make sure interrupts are enabled for the PHY */
+ if (phy_interrupt_is_valid(phydev)) {
+ err = phy_enable_interrupts(phydev);
+ if (err < 0)
+ goto out;
}
- mutex_unlock(&phydev->lock);
- phy_trigger_machine(phydev);
+ if (phydev->state == PHY_READY)
+ phydev->state = PHY_UP;
+ else
+ phydev->state = PHY_RESUMING;
+
+ phy_start_machine(phydev);
+out:
+ mutex_unlock(&phydev->lock);
}
EXPORT_SYMBOL(phy_start);
@@ -939,7 +912,6 @@ void phy_state_machine(struct work_struct *work)
break;
case PHY_NOLINK:
case PHY_RUNNING:
- case PHY_CHANGELINK:
case PHY_RESUMING:
err = phy_check_link_status(phydev);
break;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 46c86725a693..891e0178b97f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Framework for finding and configuring PHYs.
* Also contains generic PHY driver
*
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -37,8 +32,6 @@
#include <linux/uaccess.h>
#include <linux/of.h>
-#include <asm/irq.h>
-
MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
@@ -560,12 +553,33 @@ static const struct device_type mdio_bus_phy_type = {
.pm = MDIO_BUS_PHY_PM_OPS,
};
+static int phy_request_driver_module(struct phy_device *dev, int phy_id)
+{
+ int ret;
+
+ ret = request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(phy_id));
+ /* We only check for failures in executing the usermode binary,
+ * not whether a PHY driver module exists for the PHY ID.
+ * Accept -ENOENT because this may occur in case no initramfs exists,
+ * then modprobe isn't available.
+ */
+ if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) {
+ phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n",
+ ret, phy_id);
+ return ret;
+ }
+
+ return 0;
+}
+
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids)
{
struct phy_device *dev;
struct mdio_device *mdiodev;
+ int ret = 0;
/* We allocate the device, and initialize the default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -622,15 +636,21 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
if (!(c45_ids->devices_in_package & (1 << i)))
continue;
- request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
- MDIO_ID_ARGS(c45_ids->device_ids[i]));
+ ret = phy_request_driver_module(dev,
+ c45_ids->device_ids[i]);
+ if (ret)
+ break;
}
} else {
- request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
- MDIO_ID_ARGS(phy_id));
+ ret = phy_request_driver_module(dev, phy_id);
}
- device_initialize(&mdiodev->dev);
+ if (!ret) {
+ device_initialize(&mdiodev->dev);
+ } else {
+ kfree(dev);
+ dev = ERR_PTR(ret);
+ }
return dev;
}
@@ -762,15 +782,8 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
/* Grab the bits from PHYIR1, and put them in the upper half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
if (phy_reg < 0) {
- /* if there is no device, return without an error so scanning
- * the bus works properly
- */
- if (phy_reg == -EIO || phy_reg == -ENODEV) {
- *phy_id = 0xffffffff;
- return 0;
- }
-
- return -EIO;
+ /* returning -ENODEV doesn't stop bus scanning */
+ return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
}
*phy_id = (phy_reg & 0xffff) << 16;
@@ -831,13 +844,13 @@ int phy_device_register(struct phy_device *phydev)
/* Run all of the fixups for this PHY */
err = phy_scan_fixups(phydev);
if (err) {
- pr_err("PHY %d failed to initialize\n", phydev->mdio.addr);
+ phydev_err(phydev, "failed to initialize\n");
goto out;
}
err = device_add(&phydev->mdio.dev);
if (err) {
- pr_err("PHY %d failed to add\n", phydev->mdio.addr);
+ phydev_err(phydev, "failed to add\n");
goto out;
}
@@ -938,9 +951,8 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
return rc;
phy_prepare_link(phydev, handler);
- phy_start_machine(phydev);
- if (phydev->irq > 0)
- phy_start_interrupts(phydev);
+ if (phy_interrupt_is_valid(phydev))
+ phy_request_interrupt(phydev);
return 0;
}
@@ -995,10 +1007,11 @@ EXPORT_SYMBOL(phy_connect);
*/
void phy_disconnect(struct phy_device *phydev)
{
- if (phydev->irq > 0)
- phy_stop_interrupts(phydev);
+ if (phy_is_started(phydev))
+ phy_stop(phydev);
- phy_stop_machine(phydev);
+ if (phy_interrupt_is_valid(phydev))
+ free_irq(phydev->irq, phydev);
phydev->adjust_link = NULL;
@@ -1053,7 +1066,7 @@ int phy_init_hw(struct phy_device *phydev)
/* Deassert the reset signal */
phy_device_reset(phydev, 0);
- if (!phydev->drv || !phydev->drv->config_init)
+ if (!phydev->drv)
return 0;
if (phydev->drv->soft_reset)
@@ -1066,7 +1079,10 @@ int phy_init_hw(struct phy_device *phydev)
if (ret < 0)
return ret;
- return phydev->drv->config_init(phydev);
+ if (phydev->drv->config_init)
+ ret = phydev->drv->config_init(phydev);
+
+ return ret;
}
EXPORT_SYMBOL(phy_init_hw);
@@ -1291,6 +1307,36 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
}
EXPORT_SYMBOL(phy_attach);
+static bool phy_driver_is_genphy_kind(struct phy_device *phydev,
+ struct device_driver *driver)
+{
+ struct device *d = &phydev->mdio.dev;
+ bool ret = false;
+
+ if (!phydev->drv)
+ return ret;
+
+ get_device(d);
+ ret = d->driver == driver;
+ put_device(d);
+
+ return ret;
+}
+
+bool phy_driver_is_genphy(struct phy_device *phydev)
+{
+ return phy_driver_is_genphy_kind(phydev,
+ &genphy_driver.mdiodrv.driver);
+}
+EXPORT_SYMBOL_GPL(phy_driver_is_genphy);
+
+bool phy_driver_is_genphy_10g(struct phy_device *phydev)
+{
+ return phy_driver_is_genphy_kind(phydev,
+ &genphy_10g_driver.mdiodrv.driver);
+}
+EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
+
/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
@@ -1322,8 +1368,8 @@ void phy_detach(struct phy_device *phydev)
* from the generic driver so that there's a chance a
* real driver could be loaded
*/
- if (phydev->mdio.dev.driver == &genphy_10g_driver.mdiodrv.driver ||
- phydev->mdio.dev.driver == &genphy_driver.mdiodrv.driver)
+ if (phy_driver_is_genphy(phydev) ||
+ phy_driver_is_genphy_10g(phydev))
device_release_driver(&phydev->mdio.dev);
/*
@@ -2267,14 +2313,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
new_driver->mdiodrv.driver.remove = phy_remove;
new_driver->mdiodrv.driver.owner = owner;
- /* The following works around an issue where the PHY driver doesn't bind
- * to the device, resulting in the genphy driver being used instead of
- * the dedicated driver. The root cause of the issue isn't known yet
- * and seems to be in the base driver core. Once this is fixed we may
- * remove this workaround.
- */
- new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
-
retval = driver_register(&new_driver->mdiodrv.driver);
if (retval) {
pr_err("%s: Error %d in registering driver\n",
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 263385b75bba..b86a4b2116f8 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -1,15 +1,5 @@
-/* Copyright (C) 2016 National Instruments Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2016 National Instruments Corp. */
#include <linux/leds.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index e7becc7379d7..2e21ce42e388 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* phylink models the MAC to optional PHY connection, supporting
* technologies such as SFP cages where the PHY is hot-pluggable.
*
* Copyright (C) 2015 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/ethtool.h>
#include <linux/export.h>
@@ -679,9 +676,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
- phy_start_machine(phy);
- if (phy->irq > 0)
- phy_start_interrupts(phy);
+ if (phy_interrupt_is_valid(phy))
+ phy_request_interrupt(phy);
return 0;
}
@@ -1697,4 +1693,4 @@ void phylink_helper_basex_speed(struct phylink_link_state *state)
}
EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index cfe2313dbefd..5486f6fb2ab2 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/qsemi.c
*
@@ -6,12 +7,6 @@
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index c6010fb1aa0f..75543f9370f3 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/realtek.c
*
@@ -6,12 +7,6 @@
* Author: Johnson Leung <r58129@freescale.com>
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/bitops.h>
#include <linux/phy.h>
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index fd8bb998ae52..298ab7546929 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index f9477ff55545..c94d3bfbc772 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/smsc.c
*
@@ -7,11 +8,6 @@
*
* Copyright (c) 2006 Herbert Valerio Riedel <hvr@gnu.org>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@shawell.net
*
*/
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index f17b3441779b..92b64e254b44 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches
*
@@ -5,10 +6,6 @@
*
* This file was based on: drivers/spi/at25.c
* Copyright (C) 2006 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 33d733684f5b..5b6acf431f98 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/ste10Xp.c
*
@@ -6,12 +7,6 @@
* Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*
* Copyright (c) 2008 STMicroelectronics Limited
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/module.h>
diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
index 34f58f2349e9..b9743569e431 100644
--- a/drivers/net/phy/swphy.c
+++ b/drivers/net/phy/swphy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Software PHY emulation
*
@@ -7,11 +8,6 @@
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
* Copyright (c) 2006-2007 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/export.h>
#include <linux/mii.h>
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 91247182bc52..145c328b00fa 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Teranetics PHY
*
* Author: Shaohui Xie <Shaohui.Xie@freescale.com>
*
* Copyright 2015 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 1e4fc42e4629..219fc7cdc2b3 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the Renesas PHY uPD60620.
*
* Copyright (C) 2015 Softing Industrial Automation GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 0646af458f6a..dc0dd87a6694 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -1,15 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Vitesse PHYs
*
* Author: Kriston Carson
- *
- * Copyright (c) 2005, 2009, 2011 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 74a8782313cf..ebf419dc7307 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Xilinx GMII2RGMII Converter driver
*
* Copyright (C) 2016 Xilinx, Inc.
@@ -8,16 +9,6 @@
*
* Description:
* This driver is developed for Xilinx GMII2RGMII Converter
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 7820fced33f6..941cfa8f1c2a 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -535,17 +535,20 @@ sb1000_activate(const int ioaddr[], const char* name)
int status;
ssleep(1);
- if ((status = card_send_command(ioaddr, name, Command0, st)))
+ status = card_send_command(ioaddr, name, Command0, st);
+ if (status)
return status;
- if ((status = card_send_command(ioaddr, name, Command1, st)))
+ status = card_send_command(ioaddr, name, Command1, st);
+ if (status)
return status;
if (st[3] != 0xf1) {
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
+ status = sb1000_start_get_set_command(ioaddr, name);
+ if (status)
return status;
return -EIO;
}
udelay(1000);
- return sb1000_start_get_set_command(ioaddr, name);
+ return sb1000_start_get_set_command(ioaddr, name);
}
/* get SB1000 firmware version */
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 80373a9171dd..3f145e4c6c08 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -388,7 +388,6 @@ static void read_bulk_callback(struct urb *urb)
unsigned pkt_len, res;
struct sk_buff *skb;
struct net_device *netdev;
- u16 rx_stat;
int status = urb->status;
int result;
unsigned long flags;
@@ -424,7 +423,6 @@ static void read_bulk_callback(struct urb *urb)
goto goon;
res = urb->actual_length;
- rx_stat = le16_to_cpu(*(__le16 *)(urb->transfer_buffer + res - 4));
pkt_len = res - 4;
skb_put(dev->rx_skb, pkt_len);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8fadd8eaf601..2a0edd4653e3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1035,6 +1035,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
goto frame_err;
}
+ skb_record_rx_queue(skb, vq2rxq(rq->vq));
skb->protocol = eth_type_trans(skb, dev);
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5209ee9aac47..ef45c3c925be 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -361,10 +361,11 @@ errout:
static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
const struct vxlan_fdb *fdb,
const struct vxlan_rdst *rd,
+ struct netlink_ext_ack *extack,
struct switchdev_notifier_vxlan_fdb_info *fdb_info)
{
fdb_info->info.dev = vxlan->dev;
- fdb_info->info.extack = NULL;
+ fdb_info->info.extack = extack;
fdb_info->remote_ip = rd->remote_ip;
fdb_info->remote_port = rd->remote_port;
fdb_info->remote_vni = rd->remote_vni;
@@ -375,41 +376,50 @@ static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER;
}
-static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
- struct vxlan_fdb *fdb,
- struct vxlan_rdst *rd,
- bool adding)
+static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
+ struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd,
+ bool adding,
+ struct netlink_ext_ack *extack)
{
struct switchdev_notifier_vxlan_fdb_info info;
enum switchdev_notifier_type notifier_type;
+ int ret;
if (WARN_ON(!rd))
- return;
+ return 0;
notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE
: SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE;
- vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, &info);
- call_switchdev_notifiers(notifier_type, vxlan->dev,
- &info.info);
+ vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info);
+ ret = call_switchdev_notifiers(notifier_type, vxlan->dev,
+ &info.info, extack);
+ return notifier_to_errno(ret);
}
-static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
- struct vxlan_rdst *rd, int type, bool swdev_notify)
+static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd, int type, bool swdev_notify,
+ struct netlink_ext_ack *extack)
{
+ int err;
+
if (swdev_notify) {
switch (type) {
case RTM_NEWNEIGH:
- vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
- true);
+ err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
+ true, extack);
+ if (err)
+ return err;
break;
case RTM_DELNEIGH:
vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
- false);
+ false, extack);
break;
}
}
__vxlan_fdb_notify(vxlan, fdb, rd, type);
+ return 0;
}
static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
@@ -423,7 +433,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
.remote_vni = cpu_to_be32(VXLAN_N_VID),
};
- vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
+ vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
}
static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -435,7 +445,7 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
memcpy(f.eth_addr, eth_addr, ETH_ALEN);
- vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
+ vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
}
/* Hash Ethernet address */
@@ -545,7 +555,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
}
rdst = first_remote_rcu(f);
- vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, fdb_info);
+ vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info);
out:
rcu_read_unlock();
@@ -556,19 +566,21 @@ EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc);
static int vxlan_fdb_notify_one(struct notifier_block *nb,
const struct vxlan_dev *vxlan,
const struct vxlan_fdb *f,
- const struct vxlan_rdst *rdst)
+ const struct vxlan_rdst *rdst,
+ struct netlink_ext_ack *extack)
{
struct switchdev_notifier_vxlan_fdb_info fdb_info;
int rc;
- vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, &fdb_info);
+ vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info);
rc = nb->notifier_call(nb, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
&fdb_info);
return notifier_to_errno(rc);
}
int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
- struct notifier_block *nb)
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
@@ -586,7 +598,8 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
if (f->vni == vni) {
list_for_each_entry(rdst, &f->remotes, list) {
rc = vxlan_fdb_notify_one(nb, vxlan,
- f, rdst);
+ f, rdst,
+ extack);
if (rc)
goto out;
}
@@ -625,7 +638,7 @@ EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
/* Replace destination of unicast mac */
static int vxlan_fdb_replace(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port, __be32 vni,
- __u32 ifindex)
+ __u32 ifindex, struct vxlan_rdst *oldrd)
{
struct vxlan_rdst *rd;
@@ -637,6 +650,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
if (!rd)
return 0;
+ *oldrd = *rd;
dst_cache_reset(&rd->dst_cache);
rd->remote_ip = *ip;
rd->remote_port = port;
@@ -826,120 +840,177 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return 0;
}
-/* Add new entry to forwarding table -- assumes lock held */
-static int vxlan_fdb_update(struct vxlan_dev *vxlan,
- const u8 *mac, union vxlan_addr *ip,
- __u16 state, __u16 flags,
- __be16 port, __be32 src_vni, __be32 vni,
- __u32 ifindex, __u16 ndm_flags,
- bool swdev_notify)
+static void vxlan_fdb_free(struct rcu_head *head)
+{
+ struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
+ struct vxlan_rdst *rd, *nd;
+
+ list_for_each_entry_safe(rd, nd, &f->remotes, list) {
+ dst_cache_destroy(&rd->dst_cache);
+ kfree(rd);
+ }
+ kfree(f);
+}
+
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+ bool do_notify, bool swdev_notify)
+{
+ struct vxlan_rdst *rd;
+
+ netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr);
+
+ --vxlan->addrcnt;
+ if (do_notify)
+ list_for_each_entry(rd, &f->remotes, list)
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
+ swdev_notify, NULL);
+
+ hlist_del_rcu(&f->hlist);
+ call_rcu(&f->rcu, vxlan_fdb_free);
+}
+
+static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
+ union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags,
+ struct vxlan_fdb *f,
+ bool swdev_notify,
+ struct netlink_ext_ack *extack)
{
__u16 fdb_flags = (ndm_flags & ~NTF_USE);
struct vxlan_rdst *rd = NULL;
- struct vxlan_fdb *f;
+ struct vxlan_rdst oldrd;
int notify = 0;
- int rc;
-
- f = __vxlan_find_mac(vxlan, mac, src_vni);
- if (f) {
- if (flags & NLM_F_EXCL) {
- netdev_dbg(vxlan->dev,
- "lost race to create %pM\n", mac);
- return -EEXIST;
- }
+ int rc = 0;
+ int err;
- /* Do not allow an externally learned entry to take over an
- * entry added by the user.
- */
- if (!(fdb_flags & NTF_EXT_LEARNED) ||
- !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
- if (f->state != state) {
- f->state = state;
- f->updated = jiffies;
- notify = 1;
- }
- if (f->flags != fdb_flags) {
- f->flags = fdb_flags;
- f->updated = jiffies;
- notify = 1;
- }
+ /* Do not allow an externally learned entry to take over an entry added
+ * by the user.
+ */
+ if (!(fdb_flags & NTF_EXT_LEARNED) ||
+ !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
+ if (f->state != state) {
+ f->state = state;
+ f->updated = jiffies;
+ notify = 1;
}
-
- if ((flags & NLM_F_REPLACE)) {
- /* Only change unicasts */
- if (!(is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
- notify |= vxlan_fdb_replace(f, ip, port, vni,
- ifindex);
- } else
- return -EOPNOTSUPP;
+ if (f->flags != fdb_flags) {
+ f->flags = fdb_flags;
+ f->updated = jiffies;
+ notify = 1;
}
- if ((flags & NLM_F_APPEND) &&
- (is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
- rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+ }
- if (rc < 0)
- return rc;
+ if ((flags & NLM_F_REPLACE)) {
+ /* Only change unicasts */
+ if (!(is_multicast_ether_addr(f->eth_addr) ||
+ is_zero_ether_addr(f->eth_addr))) {
+ rc = vxlan_fdb_replace(f, ip, port, vni,
+ ifindex, &oldrd);
notify |= rc;
- }
-
- if (ndm_flags & NTF_USE)
- f->used = jiffies;
- } else {
- if (!(flags & NLM_F_CREATE))
- return -ENOENT;
-
- /* Disallow replace to add a multicast entry */
- if ((flags & NLM_F_REPLACE) &&
- (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
+ } else {
return -EOPNOTSUPP;
+ }
+ }
+ if ((flags & NLM_F_APPEND) &&
+ (is_multicast_ether_addr(f->eth_addr) ||
+ is_zero_ether_addr(f->eth_addr))) {
+ rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
- netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
- rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
- vni, ifindex, fdb_flags, &f);
if (rc < 0)
return rc;
- notify = 1;
+ notify |= rc;
}
+ if (ndm_flags & NTF_USE)
+ f->used = jiffies;
+
if (notify) {
if (rd == NULL)
rd = first_remote_rtnl(f);
- vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, swdev_notify);
+
+ err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH,
+ swdev_notify, extack);
+ if (err)
+ goto err_notify;
}
return 0;
+
+err_notify:
+ if ((flags & NLM_F_REPLACE) && rc)
+ *rd = oldrd;
+ else if ((flags & NLM_F_APPEND) && rc)
+ list_del_rcu(&rd->list);
+ return err;
}
-static void vxlan_fdb_free(struct rcu_head *head)
+static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags,
+ bool swdev_notify,
+ struct netlink_ext_ack *extack)
{
- struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
- struct vxlan_rdst *rd, *nd;
+ __u16 fdb_flags = (ndm_flags & ~NTF_USE);
+ struct vxlan_fdb *f;
+ int rc;
- list_for_each_entry_safe(rd, nd, &f->remotes, list) {
- dst_cache_destroy(&rd->dst_cache);
- kfree(rd);
- }
- kfree(f);
+ /* Disallow replace to add a multicast entry */
+ if ((flags & NLM_F_REPLACE) &&
+ (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
+ return -EOPNOTSUPP;
+
+ netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
+ rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
+ vni, ifindex, fdb_flags, &f);
+ if (rc < 0)
+ return rc;
+
+ rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
+ swdev_notify, extack);
+ if (rc)
+ goto err_notify;
+
+ return 0;
+
+err_notify:
+ vxlan_fdb_destroy(vxlan, f, false, false);
+ return rc;
}
-static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
- bool do_notify, bool swdev_notify)
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags,
+ bool swdev_notify,
+ struct netlink_ext_ack *extack)
{
- struct vxlan_rdst *rd;
+ struct vxlan_fdb *f;
- netdev_dbg(vxlan->dev,
- "delete %pM\n", f->eth_addr);
+ f = __vxlan_find_mac(vxlan, mac, src_vni);
+ if (f) {
+ if (flags & NLM_F_EXCL) {
+ netdev_dbg(vxlan->dev,
+ "lost race to create %pM\n", mac);
+ return -EEXIST;
+ }
- --vxlan->addrcnt;
- if (do_notify)
- list_for_each_entry(rd, &f->remotes, list)
- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
- swdev_notify);
+ return vxlan_fdb_update_existing(vxlan, ip, state, flags, port,
+ vni, ifindex, ndm_flags, f,
+ swdev_notify, extack);
+ } else {
+ if (!(flags & NLM_F_CREATE))
+ return -ENOENT;
- hlist_del_rcu(&f->hlist);
- call_rcu(&f->rcu, vxlan_fdb_free);
+ return vxlan_fdb_update_create(vxlan, mac, ip, state, flags,
+ port, src_vni, vni, ifindex,
+ ndm_flags, swdev_notify, extack);
+ }
}
static void vxlan_dst_free(struct rcu_head *head)
@@ -954,7 +1025,7 @@ static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
struct vxlan_rdst *rd, bool swdev_notify)
{
list_del_rcu(&rd->list);
- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify);
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL);
call_rcu(&rd->rcu, vxlan_dst_free);
}
@@ -1025,7 +1096,8 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
/* Add static entry (via netlink) */
static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid, u16 flags)
+ const unsigned char *addr, u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
/* struct net *net = dev_net(vxlan->dev); */
@@ -1055,7 +1127,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
- true);
+ true, extack);
spin_unlock_bh(&vxlan->hash_lock);
return err;
@@ -1223,7 +1295,7 @@ static bool vxlan_snoop(struct net_device *dev,
rdst->remote_ip = *src_ip;
f->updated = jiffies;
- vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true);
+ vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
/* learned new entry */
spin_lock(&vxlan->hash_lock);
@@ -1236,7 +1308,7 @@ static bool vxlan_snoop(struct net_device *dev,
vxlan->cfg.dst_port,
vni,
vxlan->default_dst.remote_vni,
- ifindex, NTF_SELF, true);
+ ifindex, NTF_SELF, true, NULL);
spin_unlock(&vxlan->hash_lock);
}
@@ -3478,9 +3550,12 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
goto errout;
/* notify default fdb entry */
- if (f)
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
- true);
+ if (f) {
+ err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
+ RTM_NEWNEIGH, true, extack);
+ if (err)
+ goto errout;
+ }
list_add(&vxlan->next, &vn->vxlan_list);
return 0;
@@ -3727,8 +3802,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
- unsigned long old_age_interval;
- struct vxlan_rdst old_dst;
+ struct net_device *lowerdev;
struct vxlan_config conf;
int err;
@@ -3737,46 +3811,43 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
- old_age_interval = vxlan->cfg.age_interval;
- memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
-
- err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack);
+ err = vxlan_config_validate(vxlan->net, &conf, &lowerdev,
+ vxlan, extack);
if (err)
return err;
- if (old_age_interval != vxlan->cfg.age_interval)
- mod_timer(&vxlan->age_timer, jiffies);
-
/* handle default dst entry */
- if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
+ if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
spin_lock_bh(&vxlan->hash_lock);
- if (!vxlan_addr_any(&old_dst.remote_ip))
- __vxlan_fdb_delete(vxlan, all_zeros_mac,
- old_dst.remote_ip,
- vxlan->cfg.dst_port,
- old_dst.remote_vni,
- old_dst.remote_vni,
- old_dst.remote_ifindex,
- true);
-
- if (!vxlan_addr_any(&dst->remote_ip)) {
+ if (!vxlan_addr_any(&conf.remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
- &dst->remote_ip,
+ &conf.remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
NLM_F_APPEND | NLM_F_CREATE,
vxlan->cfg.dst_port,
- dst->remote_vni,
- dst->remote_vni,
- dst->remote_ifindex,
- NTF_SELF, true);
+ conf.vni, conf.vni,
+ conf.remote_ifindex,
+ NTF_SELF, true, extack);
if (err) {
spin_unlock_bh(&vxlan->hash_lock);
return err;
}
}
+ if (!vxlan_addr_any(&dst->remote_ip))
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ dst->remote_ip,
+ vxlan->cfg.dst_port,
+ dst->remote_vni,
+ dst->remote_vni,
+ dst->remote_ifindex,
+ true);
spin_unlock_bh(&vxlan->hash_lock);
}
+ if (conf.age_interval != vxlan->cfg.age_interval)
+ mod_timer(&vxlan->age_timer, jiffies);
+
+ vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
return 0;
}
@@ -4051,8 +4122,11 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
struct switchdev_notifier_vxlan_fdb_info *fdb_info)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
int err;
+ extack = switchdev_notifier_info_to_extack(&fdb_info->info);
+
spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
NUD_REACHABLE,
@@ -4062,7 +4136,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
fdb_info->remote_vni,
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
- false);
+ false, extack);
spin_unlock_bh(&vxlan->hash_lock);
return err;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index c0b0f525c87c..d5dc823f781e 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -456,16 +456,16 @@ static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
int ret = 0;
if (debug > 1) {
- if (SOURCE_ID(state) != dpriv->dev_id) {
- printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
- dev->name, msg, SOURCE_ID(state), state );
+ if (SOURCE_ID(state) != dpriv->dev_id) {
+ printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
+ dev->name, msg, SOURCE_ID(state), state);
ret = -1;
- }
- if (state & 0x0df80c00) {
- printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
- dev->name, msg, state);
+ }
+ if (state & 0x0df80c00) {
+ printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
+ dev->name, msg, state);
ret = -1;
- }
+ }
}
return ret;
}
@@ -1760,25 +1760,25 @@ try:
} else { /* SccEvt */
if (debug > 1) {
//FIXME: verifier la presence de tous les evenements
- static struct {
- u32 mask;
- const char *irq_name;
- } evts[] = {
- { 0x00008000, "TIN"},
- { 0x00000020, "RSC"},
- { 0x00000010, "PCE"},
- { 0x00000008, "PLLA"},
- { 0, NULL}
- }, *evt;
-
- for (evt = evts; evt->irq_name; evt++) {
- if (state & evt->mask) {
+ static struct {
+ u32 mask;
+ const char *irq_name;
+ } evts[] = {
+ { 0x00008000, "TIN"},
+ { 0x00000020, "RSC"},
+ { 0x00000010, "PCE"},
+ { 0x00000008, "PLLA"},
+ { 0, NULL}
+ }, *evt;
+
+ for (evt = evts; evt->irq_name; evt++) {
+ if (state & evt->mask) {
printk(KERN_DEBUG "%s: %s\n",
- dev->name, evt->irq_name);
- if (!(state &= ~evt->mask))
- goto try;
+ dev->name, evt->irq_name);
+ if (!(state &= ~evt->mask))
+ goto try;
+ }
}
- }
} else {
if (!(state &= ~0x0000c03c))
goto try;
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
index 609710d64eb5..247f60c401ef 100644
--- a/drivers/net/wan/lmc/Makefile
+++ b/drivers/net/wan/lmc/Makefile
@@ -14,4 +14,4 @@ lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
# -DDEBUG \
# -DLMC_PACKET_LOG
-ccflags-y := -I. $(DBGDEF)
+ccflags-y := $(DBGDEF)
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index d137c480db46..aeb4a8b2e0af 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -53,7 +53,7 @@ config PTP_1588_CLOCK_QORIQ
packets using the SO_TIMESTAMPING API.
To compile this driver as a module, choose M here: the module
- will be called ptp_qoriq.
+ will be called ptp-qoriq.
config PTP_1588_CLOCK_IXP46X
tristate "Intel IXP46x as PTP clock"
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 19efa9cfa950..677d1d178a3e 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -9,4 +9,6 @@ obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o
obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
-obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp_qoriq.o
+obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
+ptp-qoriq-y += ptp_qoriq.o
+ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index fdd49c26bbcc..43416b2e8a13 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -88,6 +88,49 @@ static void set_fipers(struct qoriq_ptp *qoriq_ptp)
qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
}
+static int extts_clean_up(struct qoriq_ptp *qoriq_ptp, int index,
+ bool update_event)
+{
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ struct ptp_clock_event event;
+ void __iomem *reg_etts_l;
+ void __iomem *reg_etts_h;
+ u32 valid, stat, lo, hi;
+
+ switch (index) {
+ case 0:
+ valid = ETS1_VLD;
+ reg_etts_l = &regs->etts_regs->tmr_etts1_l;
+ reg_etts_h = &regs->etts_regs->tmr_etts1_h;
+ break;
+ case 1:
+ valid = ETS2_VLD;
+ reg_etts_l = &regs->etts_regs->tmr_etts2_l;
+ reg_etts_h = &regs->etts_regs->tmr_etts2_h;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = index;
+
+ do {
+ lo = qoriq_read(reg_etts_l);
+ hi = qoriq_read(reg_etts_h);
+
+ if (update_event) {
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ ptp_clock_event(qoriq_ptp->clock, &event);
+ }
+
+ stat = qoriq_read(&regs->ctrl_regs->tmr_stat);
+ } while (qoriq_ptp->extts_fifo_support && (stat & valid));
+
+ return 0;
+}
+
/*
* Interrupt service routine
*/
@@ -98,33 +141,28 @@ static irqreturn_t isr(int irq, void *priv)
struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
struct ptp_clock_event event;
u64 ns;
- u32 ack = 0, lo, hi, mask, val;
+ u32 ack = 0, lo, hi, mask, val, irqs;
+
+ spin_lock(&qoriq_ptp->lock);
val = qoriq_read(&regs->ctrl_regs->tmr_tevent);
+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
+
+ spin_unlock(&qoriq_ptp->lock);
- if (val & ETS1) {
+ irqs = val & mask;
+
+ if (irqs & ETS1) {
ack |= ETS1;
- hi = qoriq_read(&regs->etts_regs->tmr_etts1_h);
- lo = qoriq_read(&regs->etts_regs->tmr_etts1_l);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
- event.timestamp = ((u64) hi) << 32;
- event.timestamp |= lo;
- ptp_clock_event(qoriq_ptp->clock, &event);
+ extts_clean_up(qoriq_ptp, 0, true);
}
- if (val & ETS2) {
+ if (irqs & ETS2) {
ack |= ETS2;
- hi = qoriq_read(&regs->etts_regs->tmr_etts2_h);
- lo = qoriq_read(&regs->etts_regs->tmr_etts2_l);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 1;
- event.timestamp = ((u64) hi) << 32;
- event.timestamp |= lo;
- ptp_clock_event(qoriq_ptp->clock, &event);
+ extts_clean_up(qoriq_ptp, 1, true);
}
- if (val & ALM2) {
+ if (irqs & ALM2) {
ack |= ALM2;
if (qoriq_ptp->alarm_value) {
event.type = PTP_CLOCK_ALARM;
@@ -136,13 +174,10 @@ static irqreturn_t isr(int irq, void *priv)
ns = qoriq_ptp->alarm_value + qoriq_ptp->alarm_interval;
hi = ns >> 32;
lo = ns & 0xffffffff;
- spin_lock(&qoriq_ptp->lock);
qoriq_write(&regs->alarm_regs->tmr_alarm2_l, lo);
qoriq_write(&regs->alarm_regs->tmr_alarm2_h, hi);
- spin_unlock(&qoriq_ptp->lock);
qoriq_ptp->alarm_value = ns;
} else {
- qoriq_write(&regs->ctrl_regs->tmr_tevent, ALM2);
spin_lock(&qoriq_ptp->lock);
mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
mask &= ~ALM2EN;
@@ -153,7 +188,7 @@ static irqreturn_t isr(int irq, void *priv)
}
}
- if (val & PP1) {
+ if (irqs & PP1) {
ack |= PP1;
event.type = PTP_CLOCK_PPS;
ptp_clock_event(qoriq_ptp->clock, &event);
@@ -260,7 +295,7 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
unsigned long flags;
- u32 bit, mask;
+ u32 bit, mask = 0;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
@@ -274,32 +309,32 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
default:
return -EINVAL;
}
- spin_lock_irqsave(&qoriq_ptp->lock, flags);
- mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
- if (on)
- mask |= bit;
- else
- mask &= ~bit;
- qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
- spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
- return 0;
- case PTP_CLK_REQ_PPS:
- spin_lock_irqsave(&qoriq_ptp->lock, flags);
- mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
if (on)
- mask |= PP1EN;
- else
- mask &= ~PP1EN;
- qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
- spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
- return 0;
+ extts_clean_up(qoriq_ptp, rq->extts.index, false);
- default:
break;
+ case PTP_CLK_REQ_PPS:
+ bit = PP1EN;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&qoriq_ptp->lock, flags);
+
+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
+ if (on) {
+ mask |= bit;
+ qoriq_write(&regs->ctrl_regs->tmr_tevent, bit);
+ } else {
+ mask &= ~bit;
}
- return -EOPNOTSUPP;
+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
+
+ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
+ return 0;
}
static const struct ptp_clock_info ptp_qoriq_caps = {
@@ -436,11 +471,17 @@ static int qoriq_ptp_probe(struct platform_device *dev)
err = -EINVAL;
+ qoriq_ptp->dev = &dev->dev;
qoriq_ptp->caps = ptp_qoriq_caps;
if (of_property_read_u32(node, "fsl,cksel", &qoriq_ptp->cksel))
qoriq_ptp->cksel = DEFAULT_CKSEL;
+ if (of_property_read_bool(node, "fsl,extts-fifo"))
+ qoriq_ptp->extts_fifo_support = true;
+ else
+ qoriq_ptp->extts_fifo_support = false;
+
if (of_property_read_u32(node,
"fsl,tclk-period", &qoriq_ptp->tclk_period) ||
of_property_read_u32(node,
@@ -532,6 +573,7 @@ static int qoriq_ptp_probe(struct platform_device *dev)
}
qoriq_ptp->phc_index = ptp_clock_index(qoriq_ptp->clock);
+ ptp_qoriq_create_debugfs(qoriq_ptp);
platform_set_drvdata(dev, qoriq_ptp);
return 0;
@@ -557,6 +599,7 @@ static int qoriq_ptp_remove(struct platform_device *dev)
qoriq_write(&regs->ctrl_regs->tmr_temask, 0);
qoriq_write(&regs->ctrl_regs->tmr_ctrl, 0);
+ ptp_qoriq_remove_debugfs(qoriq_ptp);
ptp_clock_unregister(qoriq_ptp->clock);
iounmap(qoriq_ptp->base);
release_resource(qoriq_ptp->rsrc);
diff --git a/drivers/ptp/ptp_qoriq_debugfs.c b/drivers/ptp/ptp_qoriq_debugfs.c
new file mode 100644
index 000000000000..970595021088
--- /dev/null
+++ b/drivers/ptp/ptp_qoriq_debugfs.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright 2019 NXP
+ */
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/fsl/ptp_qoriq.h>
+
+static int ptp_qoriq_fiper1_lpbk_get(void *data, u64 *val)
+{
+ struct qoriq_ptp *qoriq_ptp = data;
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ u32 ctrl;
+
+ ctrl = qoriq_read(&regs->ctrl_regs->tmr_ctrl);
+ *val = ctrl & PP1L ? 1 : 0;
+
+ return 0;
+}
+
+static int ptp_qoriq_fiper1_lpbk_set(void *data, u64 val)
+{
+ struct qoriq_ptp *qoriq_ptp = data;
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ u32 ctrl;
+
+ ctrl = qoriq_read(&regs->ctrl_regs->tmr_ctrl);
+ if (val == 0)
+ ctrl &= ~PP1L;
+ else
+ ctrl |= PP1L;
+
+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, ctrl);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper1_fops, ptp_qoriq_fiper1_lpbk_get,
+ ptp_qoriq_fiper1_lpbk_set, "%llu\n");
+
+static int ptp_qoriq_fiper2_lpbk_get(void *data, u64 *val)
+{
+ struct qoriq_ptp *qoriq_ptp = data;
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ u32 ctrl;
+
+ ctrl = qoriq_read(&regs->ctrl_regs->tmr_ctrl);
+ *val = ctrl & PP2L ? 1 : 0;
+
+ return 0;
+}
+
+static int ptp_qoriq_fiper2_lpbk_set(void *data, u64 val)
+{
+ struct qoriq_ptp *qoriq_ptp = data;
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ u32 ctrl;
+
+ ctrl = qoriq_read(&regs->ctrl_regs->tmr_ctrl);
+ if (val == 0)
+ ctrl &= ~PP2L;
+ else
+ ctrl |= PP2L;
+
+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, ctrl);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper2_fops, ptp_qoriq_fiper2_lpbk_get,
+ ptp_qoriq_fiper2_lpbk_set, "%llu\n");
+
+void ptp_qoriq_create_debugfs(struct qoriq_ptp *qoriq_ptp)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir(dev_name(qoriq_ptp->dev), NULL);
+ if (IS_ERR(root))
+ return;
+ if (!root)
+ goto err_root;
+
+ qoriq_ptp->debugfs_root = root;
+
+ if (!debugfs_create_file_unsafe("fiper1-loopback", 0600, root,
+ qoriq_ptp, &ptp_qoriq_fiper1_fops))
+ goto err_node;
+ if (!debugfs_create_file_unsafe("fiper2-loopback", 0600, root,
+ qoriq_ptp, &ptp_qoriq_fiper2_fops))
+ goto err_node;
+ return;
+
+err_node:
+ debugfs_remove_recursive(root);
+ qoriq_ptp->debugfs_root = NULL;
+err_root:
+ dev_err(qoriq_ptp->dev, "failed to initialize debugfs\n");
+}
+
+void ptp_qoriq_remove_debugfs(struct qoriq_ptp *qoriq_ptp)
+{
+ debugfs_remove_recursive(qoriq_ptp->debugfs_root);
+ qoriq_ptp->debugfs_root = NULL;
+}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 0ee026947f20..d65650ef6b41 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -741,11 +741,6 @@ struct qeth_discipline {
struct qeth_ipa_cmd *cmd);
};
-struct qeth_vlan_vid {
- struct list_head list;
- unsigned short vid;
-};
-
enum qeth_addr_disposition {
QETH_DISP_ADDR_DELETE = 0,
QETH_DISP_ADDR_DO_NOTHING = 1,
@@ -792,8 +787,6 @@ struct qeth_card {
wait_queue_head_t wait_q;
spinlock_t mclock;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct mutex vid_list_mutex; /* vid_list */
- struct list_head vid_list;
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
DECLARE_HASHTABLE(ip_mc_htable, 4);
@@ -802,7 +795,6 @@ struct qeth_card {
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- struct task_struct *recovery_task;
spinlock_t ip_lock;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
@@ -976,11 +968,8 @@ extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
struct net_device *qeth_clone_netdev(struct net_device *orig);
struct qeth_card *qeth_get_card_by_busid(char *bus_id);
-void qeth_set_recovery_task(struct qeth_card *);
-void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_wait_for_threads(struct qeth_card *, unsigned long);
int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
@@ -1047,6 +1036,9 @@ netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
+int qeth_open(struct net_device *dev);
+int qeth_stop(struct net_device *dev);
+
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e63e03143ca7..dcc06e48b70b 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -193,23 +193,6 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "n/a";
}
-void qeth_set_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = current;
-}
-EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
-
-void qeth_clear_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = NULL;
-}
-EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
-
-static bool qeth_is_recovery_task(const struct qeth_card *card)
-{
- return card->recovery_task == current;
-}
-
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask)
{
@@ -236,15 +219,6 @@ int qeth_threads_running(struct qeth_card *card, unsigned long threads)
}
EXPORT_SYMBOL_GPL(qeth_threads_running);
-int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
-{
- if (qeth_is_recovery_task(card))
- return 0;
- return wait_event_interruptible(card->wait_q,
- qeth_threads_running(card, threads) == 0);
-}
-EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
-
void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
@@ -1430,7 +1404,6 @@ static void qeth_setup_card(struct qeth_card *card)
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
- mutex_init(&card->vid_list_mutex);
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
@@ -3558,8 +3531,6 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
+ count) % QDIO_MAX_BUFFERS_PER_Q;
- netif_wake_queue(card->dev);
-
if (card->options.performance_stats) {
int delta_t = qeth_get_micros();
delta_t -= card->perf_stats.cq_start_time;
@@ -3928,7 +3899,6 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
{
struct qdio_buffer *buffer = buf->buffer;
bool is_first_elem = true;
- int flush_cnt = 0;
__skb_queue_tail(&buf->skb_list, skb);
@@ -3949,24 +3919,22 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
if (!queue->do_pack) {
QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
- /* set state to PRIMED -> will be flushed */
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- flush_cnt = 1;
} else {
QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
if (queue->card->options.performance_stats)
queue->card->perf_stats.skbs_sent_pack++;
- if (buf->next_element_to_fill >=
- QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
- /*
- * packed buffer if full -> set state PRIMED
- * -> will be flushed
- */
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- flush_cnt = 1;
- }
+
+ /* If the buffer still has free elements, keep using it. */
+ if (buf->next_element_to_fill <
+ QETH_MAX_BUFFER_ELEMENTS(queue->card))
+ return 0;
}
- return flush_cnt;
+
+ /* flush out the buffer */
+ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ return 1;
}
static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
@@ -3982,7 +3950,6 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
qeth_flush_buffers(queue, index, 1);
return 0;
@@ -4040,10 +4007,9 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
}
}
- tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
- queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
- QDIO_MAX_BUFFERS_PER_Q;
- flush_count += tmp;
+
+ flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset,
+ hd_len);
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
@@ -5154,13 +5120,6 @@ retriable:
*carrier_ok = true;
}
- if (qeth_netdev_is_registered(card->dev)) {
- if (*carrier_ok)
- netif_carrier_on(card->dev);
- else
- netif_carrier_off(card->dev);
- }
-
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@@ -5937,9 +5896,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!card)
return -ENODEV;
- if (!qeth_card_hw_is_reachable(card))
- return -ENODEV;
-
if (card->info.type == QETH_CARD_TYPE_OSN)
return -EPERM;
@@ -6250,8 +6206,6 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
/* Check if we can obtain more accurate information. */
/* If QUERY_CARD_INFO command is not supported or fails, */
/* just return the heuristics that was filled above. */
- if (!qeth_card_hw_is_reachable(card))
- return -ENODEV;
rc = qeth_query_card_info(card, &carrier_info);
if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
return 0;
@@ -6534,8 +6488,6 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}
-#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
- NETIF_F_IPV6_CSUM | NETIF_F_TSO6)
/**
* qeth_enable_hw_features() - (Re-)Enable HW functions for device features
* @dev: a net_device
@@ -6545,17 +6497,20 @@ void qeth_enable_hw_features(struct net_device *dev)
struct qeth_card *card = dev->ml_priv;
netdev_features_t features;
- rtnl_lock();
features = dev->features;
- /* force-off any feature that needs an IPA sequence.
+ /* force-off any feature that might need an IPA sequence.
* netdev_update_features() will restart them.
*/
- dev->features &= ~QETH_HW_FEATURES;
+ dev->features &= ~dev->hw_features;
+ /* toggle VLAN filter, so that VIDs are re-programmed: */
+ if (IS_LAYER2(card) && IS_VM_NIC(card)) {
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
netdev_update_features(dev);
if (features != dev->features)
dev_warn(&card->gdev->dev,
"Device recovery failed to restore all offload features\n");
- rtnl_unlock();
}
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
@@ -6624,10 +6579,7 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
features &= ~NETIF_F_TSO;
if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO6;
- /* if the card isn't up, remove features that require hw changes */
- if (card->state == CARD_STATE_DOWN ||
- card->state == CARD_STATE_RECOVER)
- features &= ~QETH_HW_FEATURES;
+
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
return features;
}
@@ -6659,6 +6611,46 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(qeth_features_check);
+int qeth_open(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 4, "qethopen");
+ if (card->state == CARD_STATE_UP)
+ return 0;
+ if (card->state != CARD_STATE_SOFTSETUP)
+ return -ENODEV;
+
+ if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
+ return -EIO;
+
+ card->data.state = CH_STATE_UP;
+ card->state = CARD_STATE_UP;
+ netif_start_queue(dev);
+
+ napi_enable(&card->napi);
+ local_bh_disable();
+ napi_schedule(&card->napi);
+ /* kick-start the NAPI softirq: */
+ local_bh_enable();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_open);
+
+int qeth_stop(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 4, "qethstop");
+ netif_tx_disable(dev);
+ if (card->state == CARD_STATE_UP) {
+ card->state = CARD_STATE_SOFTSETUP;
+ napi_disable(&card->napi);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_stop);
+
static int __init qeth_core_init(void)
{
int rc;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 1ab321926f64..c7fb14a61eed 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -81,6 +81,7 @@ enum qeth_card_types {
#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD)
#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
+#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
#define IS_VM_NIC(card) ((card)->info.guestlan)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f108d4b44605..82f50cc30b0a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -25,7 +25,6 @@
#include "qeth_l2.h"
static int qeth_l2_set_offline(struct ccwgroup_device *);
-static int qeth_l2_stop(struct net_device *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -98,8 +97,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
if (rc == 0) {
dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- mac, card->dev->name);
+ "MAC address %pM successfully registered\n", mac);
} else {
switch (rc) {
case -EEXIST:
@@ -263,75 +261,28 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
qeth_l2_send_setdelvlan_cb, NULL));
}
-static void qeth_l2_process_vlans(struct qeth_card *card)
-{
- struct qeth_vlan_vid *id;
-
- QETH_CARD_TEXT(card, 3, "L2prcvln");
- mutex_lock(&card->vid_list_mutex);
- list_for_each_entry(id, &card->vid_list, list) {
- qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
- }
- mutex_unlock(&card->vid_list_mutex);
-}
-
static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct qeth_card *card = dev->ml_priv;
- struct qeth_vlan_vid *id;
- int rc;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
return 0;
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "aidREC");
- return 0;
- }
- id = kmalloc(sizeof(*id), GFP_KERNEL);
- if (id) {
- id->vid = vid;
- rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
- if (rc) {
- kfree(id);
- return rc;
- }
- mutex_lock(&card->vid_list_mutex);
- list_add_tail(&id->list, &card->vid_list);
- mutex_unlock(&card->vid_list_mutex);
- } else {
- return -ENOMEM;
- }
- return 0;
+
+ return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
}
static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
- struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
- int rc = 0;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "kidREC");
+ if (!vid)
return 0;
- }
- mutex_lock(&card->vid_list_mutex);
- list_for_each_entry(id, &card->vid_list, list) {
- if (id->vid == vid) {
- list_del(&id->list);
- tmpid = id;
- break;
- }
- }
- mutex_unlock(&card->vid_list_mutex);
- if (tmpid) {
- rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
- kfree(tmpid);
- }
- return rc;
+
+ return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
}
static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -343,9 +294,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
if (card->read.state == CH_STATE_UP &&
card->write.state == CH_STATE_UP &&
(card->state == CARD_STATE_UP)) {
- if (recovery_mode &&
- card->info.type != QETH_CARD_TYPE_OSN) {
- qeth_l2_stop(card->dev);
+ if (recovery_mode && !IS_OSN(card)) {
+ qeth_stop(card->dev);
} else {
rtnl_lock();
dev_close(card->dev);
@@ -460,6 +410,26 @@ out:
return 0;
}
+static void qeth_l2_register_dev_addr(struct qeth_card *card)
+{
+ if (!is_valid_ether_addr(card->dev->dev_addr))
+ qeth_l2_request_initial_mac(card);
+
+ if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+}
+
+static int qeth_l2_validate_addr(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (IS_OSN(card) || (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+ return eth_validate_addr(dev);
+
+ QETH_CARD_TEXT(card, 4, "nomacadr");
+ return -EPERM;
+}
+
static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
@@ -479,39 +449,22 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "setmcREC");
- return -ERESTARTSYS;
- }
-
- /* avoid racing against concurrent state change: */
- if (!mutex_trylock(&card->conf_mutex))
- return -EAGAIN;
-
- if (!qeth_card_hw_is_reachable(card)) {
- ether_addr_copy(dev->dev_addr, addr->sa_data);
- goto out_unlock;
- }
-
/* don't register the same address twice */
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- goto out_unlock;
+ return 0;
/* add the new address, switch over, drop the old */
rc = qeth_l2_send_setmac(card, addr->sa_data);
if (rc)
- goto out_unlock;
+ return rc;
ether_addr_copy(old_addr, dev->dev_addr);
ether_addr_copy(dev->dev_addr, addr->sa_data);
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
qeth_l2_remove_mac(card, old_addr);
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-
-out_unlock:
- mutex_unlock(&card->conf_mutex);
- return rc;
+ return 0;
}
static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -586,9 +539,6 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
return;
QETH_CARD_TEXT(card, 3, "setmulti");
- if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
- (card->state != CARD_STATE_UP))
- return;
spin_lock_bh(&card->mclock);
@@ -712,62 +662,6 @@ tx_drop:
return NETDEV_TX_OK;
}
-static int __qeth_l2_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
- int rc = 0;
-
- QETH_CARD_TEXT(card, 4, "qethopen");
- if (card->state == CARD_STATE_UP)
- return rc;
- if (card->state != CARD_STATE_SOFTSETUP)
- return -ENODEV;
-
- if ((card->info.type != QETH_CARD_TYPE_OSN) &&
- (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
- QETH_CARD_TEXT(card, 4, "nomacadr");
- return -EPERM;
- }
- card->data.state = CH_STATE_UP;
- card->state = CARD_STATE_UP;
- netif_start_queue(dev);
-
- if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
- napi_enable(&card->napi);
- local_bh_disable();
- napi_schedule(&card->napi);
- /* kick-start the NAPI softirq: */
- local_bh_enable();
- } else
- rc = -EIO;
- return rc;
-}
-
-static int qeth_l2_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 5, "qethope_");
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "openREC");
- return -ERESTARTSYS;
- }
- return __qeth_l2_open(dev);
-}
-
-static int qeth_l2_stop(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 4, "qethstop");
- netif_tx_disable(dev);
- if (card->state == CARD_STATE_UP) {
- card->state = CARD_STATE_SOFTSETUP;
- napi_disable(&card->napi);
- }
- return 0;
-}
-
static const struct device_type qeth_l2_devtype = {
.name = "qeth_layer2",
.groups = qeth_l2_attr_groups,
@@ -783,7 +677,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
if (rc)
return rc;
}
- INIT_LIST_HEAD(&card->vid_list);
+
hash_init(card->mac_htable);
card->info.hwtrap = 0;
qeth_l2_vnicc_set_defaults(card);
@@ -822,12 +716,12 @@ static const struct ethtool_ops qeth_l2_osn_ops = {
};
static const struct net_device_ops qeth_l2_netdev_ops = {
- .ndo_open = qeth_l2_open,
- .ndo_stop = qeth_l2_stop,
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_set_mac_address = qeth_l2_set_mac_address,
@@ -842,9 +736,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
int rc;
- if (qeth_netdev_is_registered(card->dev))
- return 0;
-
card->dev->priv_flags |= IFF_UNICAST_FLT;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
if (card->info.type == QETH_CARD_TYPE_OSN) {
@@ -855,10 +746,13 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
card->dev->needed_headroom = sizeof(struct qeth_hdr);
}
- if (card->info.type == QETH_CARD_TYPE_OSM)
+ if (IS_OSM(card)) {
card->dev->features |= NETIF_F_VLAN_CHALLENGED;
- else
+ } else {
+ if (!IS_VM_NIC(card))
+ card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->features |= NETIF_F_SG;
@@ -892,8 +786,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
- if (!is_valid_ether_addr(card->dev->dev_addr))
- qeth_l2_request_initial_mac(card);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
if (!rc && carrier_ok)
@@ -927,6 +819,7 @@ static void qeth_l2_trace_features(struct qeth_card *card)
static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct net_device *dev = card->dev;
int rc = 0;
enum qeth_card_states recover_flag;
bool carrier_ok;
@@ -948,13 +841,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
- rc = qeth_l2_setup_netdev(card, carrier_ok);
- if (rc)
- goto out_remove;
-
- if (card->info.type != QETH_CARD_TYPE_OSN &&
- !qeth_l2_send_setmac(card, card->dev->dev_addr))
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ qeth_l2_register_dev_addr(card);
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
@@ -984,11 +871,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- if (card->info.type != QETH_CARD_TYPE_OSN)
- qeth_l2_process_vlans(card);
-
- netif_tx_disable(card->dev);
-
rc = qeth_init_qdio_queues(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
@@ -999,17 +881,31 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_set_allowed_threads(card, 0xffffffff, 0);
- qeth_enable_hw_features(card->dev);
- if (recover_flag == CARD_STATE_RECOVER) {
- if (recovery_mode &&
- card->info.type != QETH_CARD_TYPE_OSN) {
- __qeth_l2_open(card->dev);
- qeth_l2_set_rx_mode(card->dev);
- } else {
- rtnl_lock();
- dev_open(card->dev, NULL);
- rtnl_unlock();
+ if (!qeth_netdev_is_registered(dev)) {
+ rc = qeth_l2_setup_netdev(card, carrier_ok);
+ if (rc)
+ goto out_remove;
+ } else {
+ rtnl_lock();
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+
+ if (recover_flag == CARD_STATE_RECOVER) {
+ if (recovery_mode && !IS_OSN(card)) {
+ if (!qeth_l2_validate_addr(dev)) {
+ qeth_open(dev);
+ qeth_l2_set_rx_mode(dev);
+ }
+ } else {
+ dev_open(dev, NULL);
+ }
}
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -1049,7 +945,11 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+ rtnl_lock();
+ netif_device_detach(card->dev);
netif_carrier_off(card->dev);
+ rtnl_unlock();
+
recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -1090,7 +990,6 @@ static int qeth_l2_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l2_set_offline(card->gdev, 1);
rc = __qeth_l2_set_online(card->gdev, 1);
if (!rc)
@@ -1101,7 +1000,6 @@ static int qeth_l2_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
@@ -1122,7 +1020,6 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
@@ -1152,7 +1049,6 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
rc = __qeth_l2_set_online(card->gdev, 0);
qeth_set_allowed_threads(card, 0xffffffff, 0);
- netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 42a7cdc59b76..59535ecb1487 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -40,7 +40,6 @@
static int qeth_l3_set_offline(struct ccwgroup_device *);
-static int qeth_l3_stop(struct net_device *);
static void qeth_l3_set_rx_mode(struct net_device *dev);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
@@ -1281,10 +1280,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "kidREC");
- return 0;
- }
clear_bit(vid, card->active_vlans);
qeth_l3_set_rx_mode(dev);
return 0;
@@ -1410,7 +1405,7 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
card->write.state == CH_STATE_UP &&
(card->state == CARD_STATE_UP)) {
if (recovery_mode)
- qeth_l3_stop(card->dev);
+ qeth_stop(card->dev);
else {
rtnl_lock();
dev_close(card->dev);
@@ -1473,9 +1468,7 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
int i, rc;
QETH_CARD_TEXT(card, 3, "setmulti");
- if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
- (card->state != CARD_STATE_UP))
- return;
+
if (!card->options.sniffer) {
spin_lock_bh(&card->mclock);
@@ -2100,56 +2093,6 @@ tx_drop:
return NETDEV_TX_OK;
}
-static int __qeth_l3_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
- int rc = 0;
-
- QETH_CARD_TEXT(card, 4, "qethopen");
- if (card->state == CARD_STATE_UP)
- return rc;
- if (card->state != CARD_STATE_SOFTSETUP)
- return -ENODEV;
- card->data.state = CH_STATE_UP;
- card->state = CARD_STATE_UP;
- netif_start_queue(dev);
-
- if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
- napi_enable(&card->napi);
- local_bh_disable();
- napi_schedule(&card->napi);
- /* kick-start the NAPI softirq: */
- local_bh_enable();
- } else
- rc = -EIO;
- return rc;
-}
-
-static int qeth_l3_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 5, "qethope_");
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "openREC");
- return -ERESTARTSYS;
- }
- return __qeth_l3_open(dev);
-}
-
-static int qeth_l3_stop(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 4, "qethstop");
- netif_tx_disable(dev);
- if (card->state == CARD_STATE_UP) {
- card->state = CARD_STATE_SOFTSETUP;
- napi_disable(&card->napi);
- }
- return 0;
-}
-
static const struct ethtool_ops qeth_l3_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_strings = qeth_core_get_strings,
@@ -2193,8 +2136,8 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
}
static const struct net_device_ops qeth_l3_netdev_ops = {
- .ndo_open = qeth_l3_open,
- .ndo_stop = qeth_l3_stop,
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
@@ -2208,8 +2151,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
};
static const struct net_device_ops qeth_l3_osa_netdev_ops = {
- .ndo_open = qeth_l3_open,
- .ndo_stop = qeth_l3_stop,
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
@@ -2229,9 +2172,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
unsigned int headroom;
int rc;
- if (qeth_netdev_is_registered(card->dev))
- return 0;
-
if (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -2347,6 +2287,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct net_device *dev = card->dev;
int rc = 0;
enum qeth_card_states recover_flag;
bool carrier_ok;
@@ -2364,10 +2305,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- rc = qeth_l3_setup_netdev(card, carrier_ok);
- if (rc)
- goto out_remove;
-
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
@@ -2397,7 +2334,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
}
- netif_tx_disable(card->dev);
rc = qeth_init_qdio_queues(card);
if (rc) {
@@ -2410,14 +2346,27 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_set_allowed_threads(card, 0xffffffff, 0);
qeth_l3_recover_ip(card);
- qeth_enable_hw_features(card->dev);
- if (recover_flag == CARD_STATE_RECOVER) {
+ if (!qeth_netdev_is_registered(dev)) {
+ rc = qeth_l3_setup_netdev(card, carrier_ok);
+ if (rc)
+ goto out_remove;
+ } else {
rtnl_lock();
- if (recovery_mode) {
- __qeth_l3_open(card->dev);
- qeth_l3_set_rx_mode(card->dev);
- } else {
- dev_open(card->dev, NULL);
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+
+ if (recover_flag == CARD_STATE_RECOVER) {
+ if (recovery_mode) {
+ qeth_open(dev);
+ qeth_l3_set_rx_mode(dev);
+ } else {
+ dev_open(dev, NULL);
+ }
}
rtnl_unlock();
}
@@ -2459,7 +2408,11 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+ rtnl_lock();
+ netif_device_detach(card->dev);
netif_carrier_off(card->dev);
+ rtnl_unlock();
+
recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -2506,7 +2459,6 @@ static int qeth_l3_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l3_set_offline(card->gdev, 1);
rc = __qeth_l3_set_online(card->gdev, 1);
if (!rc)
@@ -2517,7 +2469,6 @@ static int qeth_l3_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
@@ -2527,7 +2478,6 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
@@ -2557,7 +2507,6 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
rc = __qeth_l3_set_online(card->gdev, 0);
qeth_set_allowed_threads(card, 0xffffffff, 0);
- netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e734f163bd0b..0394f1f9213b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -268,9 +268,15 @@ struct bpf_verifier_ops {
};
struct bpf_prog_offload_ops {
+ /* verifier basic callbacks */
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env);
+ /* verifier optimization callbacks (called after .finalize) */
+ int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+ int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
+ /* program management callbacks */
int (*prepare)(struct bpf_prog *prog);
int (*translate)(struct bpf_prog *prog);
void (*destroy)(struct bpf_prog *prog);
@@ -283,6 +289,7 @@ struct bpf_prog_offload {
void *dev_priv;
struct list_head offloads;
bool dev_state;
+ bool opt_failed;
void *jited_image;
u32 jited_len;
};
@@ -397,6 +404,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
/* an array of programs to be executed under rcu_lock.
*
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 573cca00a0e6..0620e418dde5 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -187,6 +187,7 @@ struct bpf_insn_aux_data {
int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
u8 alu_state; /* used in combination with alu_limit */
+ unsigned int orig_idx; /* original instruction index */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -265,5 +266,10 @@ int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
+void
+bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+void
+bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ad106d845b22..e4b473f85b46 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -277,6 +277,26 @@ struct sock_reuseport;
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
@@ -778,6 +798,7 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
void bpf_clear_redirect_map(struct bpf_map *map);
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index c1f003aadcce..94e9797e434c 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -120,6 +120,8 @@ struct qoriq_ptp_registers {
/* Bit definitions for the TMR_STAT register */
#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
#define STAT_VEC_MASK (0x3f)
+#define ETS1_VLD (1<<24)
+#define ETS2_VLD (1<<25)
/* Bit definitions for the TMR_PRSC register */
#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
@@ -141,6 +143,9 @@ struct qoriq_ptp {
struct ptp_clock *clock;
struct ptp_clock_info caps;
struct resource *rsrc;
+ struct dentry *debugfs_root;
+ struct device *dev;
+ bool extts_fifo_support;
int irq;
int phc_index;
u64 alarm_interval; /* for periodic alarm */
@@ -166,4 +171,14 @@ static inline void qoriq_write(unsigned __iomem *addr, u32 val)
iowrite32be(val, addr);
}
+#ifdef CONFIG_DEBUG_FS
+void ptp_qoriq_create_debugfs(struct qoriq_ptp *qoriq_ptp);
+void ptp_qoriq_remove_debugfs(struct qoriq_ptp *qoriq_ptp);
+#else
+static inline void ptp_qoriq_create_debugfs(struct qoriq_ptp *qoriq_ptp)
+{ }
+static inline void ptp_qoriq_remove_debugfs(struct qoriq_ptp *qoriq_ptp)
+{ }
+#endif
+
#endif
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 119f53941c12..8b4348f69bc5 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -18,6 +18,7 @@
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/in.h>
+#include <linux/ip.h>
#include <linux/refcount.h>
#include <uapi/linux/igmp.h>
@@ -106,6 +107,14 @@ struct ip_mc_list {
#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
+static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (skb_transport_offset(skb) + ip_transport_len(skb) < len)
+ return -EINVAL;
+
+ return pskb_may_pull(skb, len);
+}
+
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
extern int igmp_rcv(struct sk_buff *);
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
@@ -130,6 +139,6 @@ extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
-int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
+int ip_mc_check_igmp(struct sk_buff *skb);
#endif
diff --git a/include/linux/in.h b/include/linux/in.h
index 31b493734763..435e7f2a513a 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -60,6 +60,11 @@ static inline bool ipv4_is_lbcast(__be32 addr)
return addr == htonl(INADDR_BROADCAST);
}
+static inline bool ipv4_is_all_snoopers(__be32 addr)
+{
+ return addr == htonl(INADDR_ALLSNOOPERS_GROUP);
+}
+
static inline bool ipv4_is_zeronet(__be32 addr)
{
return (addr & htonl(0xff000000)) == htonl(0x00000000);
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 492bc6513533..482b7b7c9f30 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -34,4 +34,9 @@ static inline struct iphdr *ipip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_transport_header(skb);
}
+
+static inline unsigned int ip_transport_len(const struct sk_buff *skb)
+{
+ return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
+}
#endif /* _LINUX_IP_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 495e834c1367..6d45ce784bea 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -104,6 +104,12 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
return (struct ipv6hdr *)skb_transport_header(skb);
}
+static inline unsigned int ipv6_transport_len(const struct sk_buff *skb)
+{
+ return ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr) -
+ skb_network_header_len(skb);
+}
+
/*
This structure contains results of exthdrs parsing
as offsets from skb->nh.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1377d085ef99..e675ef97a426 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -630,6 +630,7 @@ struct netdev_queue {
} ____cacheline_aligned_in_smp;
extern int sysctl_fb_tunnels_only_for_init_net;
+extern int sysctl_devconf_inherit_init_net;
static inline bool net_has_fallback_tunnels(const struct net *net)
{
@@ -1152,7 +1153,8 @@ struct dev_ifalias {
*
* int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr, u16 vid, u16 flags)
+ * const unsigned char *addr, u16 vid, u16 flags,
+ * struct netlink_ext_ack *extack);
* Adds an FDB entry to dev for addr.
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
@@ -1376,7 +1378,8 @@ struct net_device_ops {
struct net_device *dev,
const unsigned char *addr,
u16 vid,
- u16 flags);
+ u16 flags,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index 6989e2e4eabf..25f9a770fb84 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -19,27 +19,18 @@ struct nf_conn;
struct nf_ct_gre_keymap {
struct list_head list;
struct nf_conntrack_tuple tuple;
-};
-
-enum grep_conntrack {
- GRE_CT_UNREPLIED,
- GRE_CT_REPLIED,
- GRE_CT_MAX
-};
-
-struct netns_proto_gre {
- struct nf_proto_net nf;
- rwlock_t keymap_lock;
- struct list_head keymap_list;
- unsigned int gre_timeouts[GRE_CT_MAX];
+ struct rcu_head rcu;
};
/* add new tuple->key_reply pair to keymap */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
+void nf_ct_gre_keymap_flush(struct net *net);
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
+bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
+ struct net *net, struct nf_conntrack_tuple *tuple);
#endif /* __KERNEL__ */
#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 95ab5cc64422..082e2c41b7ff 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -25,7 +25,6 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict);
-int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
#else
static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol)
@@ -37,11 +36,6 @@ static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
{
return -EOPNOTSUPP;
}
-static inline int nf_ip_reroute(struct sk_buff *skb,
- const struct nf_queue_entry *entry)
-{
- return -EOPNOTSUPP;
-}
#endif /* CONFIG_INET */
#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 4e8add270200..593d1b9c33a8 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -126,6 +126,7 @@ void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
const struct netlink_ext_ack *extack);
int netlink_has_listeners(struct sock *sk, unsigned int group);
+bool netlink_strict_get_check(struct sk_buff *skb);
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index ef20aeea10cc..70f83d0d7469 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -306,11 +306,6 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
* - irq or timer will set NOLINK if link goes down
* - phy_stop moves to HALTED
*
- * CHANGELINK: PHY experienced a change in link state
- * - timer moves to RUNNING if link
- * - timer moves to NOLINK if the link is down
- * - phy_stop moves to HALTED
- *
* HALTED: PHY is up, but no polling or interrupts are done. Or
* PHY is in an error state.
*
@@ -329,7 +324,6 @@ enum phy_state {
PHY_RUNNING,
PHY_NOLINK,
PHY_FORCING,
- PHY_CHANGELINK,
PHY_RESUMING
};
@@ -959,7 +953,6 @@ int phy_aneg_done(struct phy_device *phydev);
int phy_speed_down(struct phy_device *phydev, bool sync);
int phy_speed_up(struct phy_device *phydev);
-int phy_stop_interrupts(struct phy_device *phydev);
int phy_restart_aneg(struct phy_device *phydev);
int phy_reset_after_clk_enable(struct phy_device *phydev);
@@ -1054,7 +1047,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
-int phy_start_interrupts(struct phy_device *phydev);
+void phy_request_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
@@ -1185,4 +1178,7 @@ module_exit(phy_module_exit)
#define module_phy_driver(__phy_drivers) \
phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
+bool phy_driver_is_genphy(struct phy_device *phydev);
+bool phy_driver_is_genphy_10g(struct phy_device *phydev);
+
#endif /* __PHY_H */
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h
index 8eaef2f2b691..c3b61ead41f2 100644
--- a/include/linux/platform_data/b53.h
+++ b/include/linux/platform_data/b53.h
@@ -20,7 +20,7 @@
#define __B53_H
#include <linux/kernel.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
struct b53_platform_data {
/* Must be first such that dsa_register_switch() can access it */
diff --git a/include/linux/platform_data/dsa.h b/include/linux/platform_data/dsa.h
new file mode 100644
index 000000000000..d4d9bf2060a6
--- /dev/null
+++ b/include/linux/platform_data/dsa.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DSA_PDATA_H
+#define __DSA_PDATA_H
+
+struct device;
+struct net_device;
+
+#define DSA_MAX_SWITCHES 4
+#define DSA_MAX_PORTS 12
+#define DSA_RTABLE_NONE -1
+
+struct dsa_chip_data {
+ /*
+ * How to access the switch configuration registers.
+ */
+ struct device *host_dev;
+ int sw_addr;
+
+ /*
+ * Reference to network devices
+ */
+ struct device *netdev[DSA_MAX_PORTS];
+
+ /* set to size of eeprom if supported by the switch */
+ int eeprom_len;
+
+ /* Device tree node pointer for this specific switch chip
+ * used during switch setup in case additional properties
+ * and resources needs to be used
+ */
+ struct device_node *of_node;
+
+ /*
+ * The names of the switch's ports. Use "cpu" to
+ * designate the switch port that the cpu is connected to,
+ * "dsa" to indicate that this port is a DSA link to
+ * another switch, NULL to indicate the port is unused,
+ * or any other string to indicate this is a physical port.
+ */
+ char *port_names[DSA_MAX_PORTS];
+ struct device_node *port_dn[DSA_MAX_PORTS];
+
+ /*
+ * An array of which element [a] indicates which port on this
+ * switch should be used to send packets to that are destined
+ * for switch a. Can be NULL if there is only one switch chip.
+ */
+ s8 rtable[DSA_MAX_SWITCHES];
+};
+
+struct dsa_platform_data {
+ /*
+ * Reference to a Linux network interface that connects
+ * to the root switch chip of the tree.
+ */
+ struct device *netdev;
+ struct net_device *of_netdev;
+
+ /*
+ * Info structs describing each of the switch chips
+ * connected via this network interface.
+ */
+ int nr_chips;
+ struct dsa_chip_data *chip;
+};
+
+
+#endif /* __DSA_PDATA_H */
diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h
index f63af2955ea0..963730b44aea 100644
--- a/include/linux/platform_data/mv88e6xxx.h
+++ b/include/linux/platform_data/mv88e6xxx.h
@@ -2,7 +2,7 @@
#ifndef __DSA_MV88E6XXX_H
#define __DSA_MV88E6XXX_H
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
struct dsa_mv88e6xxx_pdata {
/* Must be first, such that dsa_register_switch() can access this
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 91c536a01b56..35170f74ed80 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -38,7 +38,6 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
-#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <linux/compiler.h>
@@ -764,6 +763,7 @@ struct qed_probe_params {
u32 dp_module;
u8 dp_level;
bool is_vf;
+ bool recov_in_prog;
};
#define QED_DRV_VER_STR_SIZE 12
@@ -810,6 +810,7 @@ struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev,
struct qed_link_output *link);
+ void (*schedule_recovery_handler)(void *dev);
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
void (*get_protocol_tlv_data)(void *dev, void *data);
@@ -1058,6 +1059,24 @@ struct qed_common_ops {
void __iomem *db_addr, void *db_data);
/**
+ * @brief recovery_process - Trigger a recovery process
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*recovery_process)(struct qed_dev *cdev);
+
+/**
+ * @brief recovery_prolog - Execute the prolog operations of a recovery process
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*recovery_prolog)(struct qed_dev *cdev);
+
+/**
* @brief update_drv_state - API to inform the change in the driver state.
*
* @param cdev
diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h
index 9904617a9730..5a00c7a473bf 100644
--- a/include/linux/qed/qede_rdma.h
+++ b/include/linux/qed/qede_rdma.h
@@ -74,21 +74,23 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv);
bool qede_rdma_supported(struct qede_dev *dev);
#if IS_ENABLED(CONFIG_QED_RDMA)
-int qede_rdma_dev_add(struct qede_dev *dev);
+int qede_rdma_dev_add(struct qede_dev *dev, bool recovery);
void qede_rdma_dev_event_open(struct qede_dev *dev);
void qede_rdma_dev_event_close(struct qede_dev *dev);
-void qede_rdma_dev_remove(struct qede_dev *dev);
+void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery);
void qede_rdma_event_changeaddr(struct qede_dev *edr);
#else
-static inline int qede_rdma_dev_add(struct qede_dev *dev)
+static inline int qede_rdma_dev_add(struct qede_dev *dev,
+ bool recovery)
{
return 0;
}
static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {}
static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {}
-static inline void qede_rdma_dev_remove(struct qede_dev *dev) {}
+static inline void qede_rdma_dev_remove(struct qede_dev *dev,
+ bool recovery) {}
static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {}
#endif
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 95d25b010a25..c34595374e93 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1221,6 +1221,11 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
}
#endif
+struct bpf_flow_keys;
+bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
+ const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ struct bpf_flow_keys *flow_keys);
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 1656c5978498..20d523ee2fec 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -49,6 +49,7 @@ struct prefix_info {
struct in6_addr prefix;
};
+#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <net/if_inet6.h>
#include <net/ipv6.h>
@@ -201,6 +202,15 @@ u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
/*
* multicast prototypes (mcast.c)
*/
+static inline int ipv6_mc_may_pull(struct sk_buff *skb,
+ unsigned int len)
+{
+ if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
+ return -EINVAL;
+
+ return pskb_may_pull(skb, len);
+}
+
int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
@@ -219,7 +229,8 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
void ipv6_mc_remap(struct inet6_dev *idev);
void ipv6_mc_init_dev(struct inet6_dev *idev);
void ipv6_mc_destroy_dev(struct inet6_dev *idev);
-int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
+int ipv6_mc_check_icmpv6(struct sk_buff *skb);
+int ipv6_mc_check_mld(struct sk_buff *skb);
void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
@@ -489,6 +500,20 @@ static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
#endif
}
+static inline bool ipv6_addr_is_all_snoopers(const struct in6_addr *addr)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ __be64 *p = (__be64 *)addr;
+
+ return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
+ (p[1] ^ cpu_to_be64(0x6a))) == 0UL;
+#else
+ return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
+ addr->s6_addr32[1] | addr->s6_addr32[2] |
+ (addr->s6_addr32[3] ^ htonl(0x0000006a))) == 0;
+#endif
+}
+
#ifdef CONFIG_PROC_FS
int if6_proc_init(void);
void if6_proc_exit(void);
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index fc3111515f5c..c781e1afd683 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -180,6 +180,19 @@ struct port;
#pragma pack(8)
#endif
+struct bond_3ad_stats {
+ atomic64_t lacpdu_rx;
+ atomic64_t lacpdu_tx;
+ atomic64_t lacpdu_unknown_rx;
+ atomic64_t lacpdu_illegal_rx;
+
+ atomic64_t marker_rx;
+ atomic64_t marker_tx;
+ atomic64_t marker_resp_rx;
+ atomic64_t marker_resp_tx;
+ atomic64_t marker_unknown_rx;
+};
+
/* aggregator structure(43.4.5 in the 802.3ad standard) */
typedef struct aggregator {
struct mac_addr aggregator_mac_address;
@@ -265,6 +278,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
+ struct bond_3ad_stats stats;
u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
@@ -272,6 +286,7 @@ struct ad_bond_info {
struct ad_slave_info {
struct aggregator aggregator; /* 802.3ad aggregator structure */
struct port port; /* 802.3ad port structure */
+ struct bond_3ad_stats stats;
u16 id;
};
@@ -307,5 +322,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
void bond_3ad_update_ad_actor_settings(struct bonding *bond);
+int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
+size_t bond_3ad_stats_size(void);
#endif /* _NET_BOND_3AD_H */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b3eefe8e18fd..7f2a668ef2cc 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -21,6 +21,7 @@
#include <linux/ethtool.h>
#include <linux/net_tstamp.h>
#include <linux/phy.h>
+#include <linux/platform_data/dsa.h>
#include <net/devlink.h>
#include <net/switchdev.h>
@@ -44,66 +45,6 @@ enum dsa_tag_protocol {
DSA_TAG_LAST, /* MUST BE LAST */
};
-#define DSA_MAX_SWITCHES 4
-#define DSA_MAX_PORTS 12
-
-#define DSA_RTABLE_NONE -1
-
-struct dsa_chip_data {
- /*
- * How to access the switch configuration registers.
- */
- struct device *host_dev;
- int sw_addr;
-
- /*
- * Reference to network devices
- */
- struct device *netdev[DSA_MAX_PORTS];
-
- /* set to size of eeprom if supported by the switch */
- int eeprom_len;
-
- /* Device tree node pointer for this specific switch chip
- * used during switch setup in case additional properties
- * and resources needs to be used
- */
- struct device_node *of_node;
-
- /*
- * The names of the switch's ports. Use "cpu" to
- * designate the switch port that the cpu is connected to,
- * "dsa" to indicate that this port is a DSA link to
- * another switch, NULL to indicate the port is unused,
- * or any other string to indicate this is a physical port.
- */
- char *port_names[DSA_MAX_PORTS];
- struct device_node *port_dn[DSA_MAX_PORTS];
-
- /*
- * An array of which element [a] indicates which port on this
- * switch should be used to send packets to that are destined
- * for switch a. Can be NULL if there is only one switch chip.
- */
- s8 rtable[DSA_MAX_SWITCHES];
-};
-
-struct dsa_platform_data {
- /*
- * Reference to a Linux network interface that connects
- * to the root switch chip of the tree.
- */
- struct device *netdev;
- struct net_device *of_netdev;
-
- /*
- * Info structs describing each of the switch chips
- * connected via this network interface.
- */
- int nr_chips;
- struct dsa_chip_data *chip;
-};
-
struct packet_type;
struct dsa_switch;
@@ -208,6 +149,11 @@ struct dsa_port {
* Original copy of the master netdev ethtool_ops
*/
const struct ethtool_ops *orig_ethtool_ops;
+
+ /*
+ * Original copy of the master netdev net_device_ops
+ */
+ const struct net_device_ops *orig_ndo_ops;
};
struct dsa_switch {
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 371b3b45fd5c..ff40e1d08157 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -139,8 +139,8 @@ struct inet_connection_sock {
} icsk_mtup;
u32 icsk_user_timeout;
- u64 icsk_ca_priv[88 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
+ u64 icsk_ca_priv[104 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
@@ -314,4 +314,29 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
+
+#define TCP_PINGPONG_THRESH 3
+
+static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
+{
+ inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
+}
+
+static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
+{
+ inet_csk(sk)->icsk_ack.pingpong = 0;
+}
+
+static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
+{
+ return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
+}
+
+static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ack.pingpong < U8_MAX)
+ icsk->icsk_ack.pingpong++;
+}
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 1662cbc0b46b..b02bf737d019 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -77,8 +77,8 @@ struct inet_frag_queue {
struct timer_list timer;
spinlock_t lock;
refcount_t refcnt;
- struct sk_buff *fragments; /* Used in IPv6. */
- struct rb_root rb_fragments; /* Used in IPv4. */
+ struct sk_buff *fragments; /* used in 6lopwpan IPv6. */
+ struct rb_root rb_fragments; /* Used in IPv4/IPv6. */
struct sk_buff *fragments_tail;
struct sk_buff *last_run_head;
ktime_t stamp;
@@ -153,4 +153,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
extern const u8 ip_frag_ecn_table[16];
+/* Return values of inet_frag_queue_insert() */
+#define IPFRAG_OK 0
+#define IPFRAG_DUP 1
+#define IPFRAG_OVERLAP 2
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ int offset, int end);
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ struct sk_buff *parent);
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ void *reasm_data);
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
+
#endif
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 34f019650941..f069f64ebf29 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -267,7 +267,7 @@ void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
- const u8 proto);
+ const u8 proto, int tunnel_hlen);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a0d2e0bb9a94..047f9a5ccaad 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -453,9 +453,6 @@ struct ip_vs_protocol {
int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
- int (*csum_check)(int af, struct sk_buff *skb,
- struct ip_vs_protocol *pp);
-
const char *(*state_name)(int state);
void (*state_transition)(struct ip_vs_conn *cp, int direction,
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index 6ced1e6899b6..28aa9b30aece 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -82,8 +82,15 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
/* Don't send error if the first segment did not arrive. */
- head = fq->q.fragments;
- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN))
+ goto out;
+
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
+ * pull the head out of the tree in order to be able to
+ * deal with head->dev.
+ */
+ head = inet_frag_pull_head(&fq->q);
+ if (!head)
goto out;
head->dev = dev;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 99d4148e0f90..a68ced28d8f4 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -31,6 +31,7 @@
#include <net/netns/xfrm.h>
#include <net/netns/mpls.h>
#include <net/netns/can.h>
+#include <net/netns/xdp.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
#include <linux/skbuff.h>
@@ -161,6 +162,9 @@ struct net {
#if IS_ENABLED(CONFIG_CAN)
struct netns_can can;
#endif
+#ifdef CONFIG_XDP_SOCKETS
+ struct netns_xdp xdp;
+#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
} __randomize_layout;
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 4cd56808ac4e..89808ce293c4 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -43,7 +43,6 @@ static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
}
struct net_device *setup_pre_routing(struct sk_buff *skb);
-void br_netfilter_enable(void);
#if IS_ENABLED(CONFIG_IPV6)
int br_validate_ipv6(struct net *net, struct sk_buff *skb);
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 135ee702c7b0..2c8c2b023848 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -22,5 +22,8 @@ extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp;
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite;
#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre;
+#endif
#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 249d0a5b12b8..b5aac5ae5129 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -187,8 +187,6 @@ bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num, struct net *net,
struct nf_conntrack_tuple *tuple);
-bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig);
void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index afc9b3620473..ae41e92251dd 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -26,7 +26,7 @@ int nf_conntrack_init_net(struct net *net);
void nf_conntrack_cleanup_net(struct net *net);
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
-int nf_conntrack_proto_pernet_init(struct net *net);
+void nf_conntrack_proto_pernet_init(struct net *net);
void nf_conntrack_proto_pernet_fini(struct net *net);
int nf_conntrack_proto_init(void);
@@ -39,8 +39,7 @@ void nf_conntrack_init_end(void);
void nf_conntrack_cleanup_end(void);
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l4proto *l4proto);
+ const struct nf_conntrack_tuple *orig);
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index ae7b86f587f2..778087591983 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -27,27 +27,6 @@ struct nf_conntrack_l4proto {
/* protoinfo nlattr size, closes a hole */
u16 nlattr_size;
- /* Try to fill in the third arg: dataoff is offset past network protocol
- hdr. Return true if possible. */
- bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple);
-
- /* Invert the per-proto part of the tuple: ie. turn xmit into reply.
- * Only used by icmp, most protocols use a generic version.
- */
- bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig);
-
- /* Returns verdict for packet, or -1 for invalid. */
- int (*packet)(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state);
-
- /* Called when a conntrack entry is destroyed */
- void (*destroy)(struct nf_conn *ct);
-
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
@@ -79,16 +58,22 @@ struct nf_conntrack_l4proto {
/* Print out the private part of the conntrack. */
void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
#endif
- unsigned int *net_id;
- /* Init l4proto pernet data */
- int (*init_net)(struct net *net);
+};
- /* Return the per-net protocol part. */
- struct nf_proto_net *(*get_net_proto)(struct net *net);
+bool icmp_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple);
- /* Module (if any) which this is connected to. */
- struct module *me;
-};
+bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple);
+
+bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig);
+bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig);
int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
struct sk_buff *skb,
@@ -99,31 +84,63 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state);
+
+int nf_conntrack_icmp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_icmpv6_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_udp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_udplite_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_dccp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_gre_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+
+void nf_conntrack_generic_init_net(struct net *net);
+void nf_conntrack_tcp_init_net(struct net *net);
+void nf_conntrack_udp_init_net(struct net *net);
+void nf_conntrack_gre_init_net(struct net *net);
+void nf_conntrack_dccp_init_net(struct net *net);
+void nf_conntrack_sctp_init_net(struct net *net);
+void nf_conntrack_icmp_init_net(struct net *net);
+void nf_conntrack_icmpv6_init_net(struct net *net);
+
/* Existing built-in generic protocol */
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
#define MAX_NF_CT_PROTO IPPROTO_UDPLITE
-const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto);
-
-const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4proto);
-void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p);
-
-/* Protocol pernet registration. */
-int nf_ct_l4proto_pernet_register_one(struct net *net,
- const struct nf_conntrack_l4proto *proto);
-void nf_ct_l4proto_pernet_unregister_one(struct net *net,
- const struct nf_conntrack_l4proto *proto);
-int nf_ct_l4proto_pernet_register(struct net *net,
- const struct nf_conntrack_l4proto *const proto[],
- unsigned int num_proto);
-void nf_ct_l4proto_pernet_unregister(struct net *net,
- const struct nf_conntrack_l4proto *const proto[],
- unsigned int num_proto);
-
-/* Protocol global registration. */
-int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
-void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto);
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
@@ -192,4 +209,11 @@ static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
}
#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+static inline struct nf_gre_net *nf_gre_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.gre;
+}
+#endif
+
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index a17eb2f8d40e..8aff77cafb8b 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -47,10 +47,6 @@ extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct);
-/* Is this tuple already taken? (not by us)*/
-int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
- const struct nf_conn *ignored_conntrack);
-
static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
{
#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 841835a387e1..45eba7d7ab38 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1012,21 +1012,32 @@ int nft_verdict_dump(struct sk_buff *skb, int type,
const struct nft_verdict *v);
/**
+ * struct nft_object_hash_key - key to lookup nft_object
+ *
+ * @name: name of the stateful object to look up
+ * @table: table the object belongs to
+ */
+struct nft_object_hash_key {
+ const char *name;
+ const struct nft_table *table;
+};
+
+/**
* struct nft_object - nf_tables stateful object
*
* @list: table stateful object list node
- * @table: table this object belongs to
- * @name: name of this stateful object
+ * @key: keys that identify this object
+ * @rhlhead: nft_objname_ht node
* @genmask: generation mask
* @use: number of references to this stateful object
* @handle: unique object handle
* @ops: object operations
- * @data: object data, layout depends on type
+ * @data: object data, layout depends on type
*/
struct nft_object {
struct list_head list;
- char *name;
- struct nft_table *table;
+ struct rhlist_head rhlhead;
+ struct nft_object_hash_key key;
u32 genmask:2,
use:30;
u64 handle;
@@ -1043,11 +1054,12 @@ static inline void *nft_obj_data(const struct nft_object *obj)
#define nft_expr_obj(expr) *((struct nft_object **)nft_expr_priv(expr))
-struct nft_object *nft_obj_lookup(const struct nft_table *table,
+struct nft_object *nft_obj_lookup(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla, u32 objtype,
u8 genmask);
-void nft_obj_notify(struct net *net, struct nft_table *table,
+void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq,
int event, int family, int report, gfp_t gfp);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 2046d104f323..7281895fa6d9 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -80,6 +80,22 @@ struct nft_regs;
struct nft_pktinfo;
void nft_meta_get_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_cmp_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
void nft_lookup_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_payload_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_immediate_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_bitwise_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_range_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_byteorder_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_dynset_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_rt_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 51cba0b8adf5..f19b53130bf7 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -18,21 +18,11 @@
struct ctl_table_header;
struct nf_conntrack_ecache;
-struct nf_proto_net {
-#ifdef CONFIG_SYSCTL
- struct ctl_table_header *ctl_table_header;
- struct ctl_table *ctl_table;
-#endif
- unsigned int users;
-};
-
struct nf_generic_net {
- struct nf_proto_net pn;
unsigned int timeout;
};
struct nf_tcp_net {
- struct nf_proto_net pn;
unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
unsigned int tcp_loose;
unsigned int tcp_be_liberal;
@@ -46,18 +36,15 @@ enum udp_conntrack {
};
struct nf_udp_net {
- struct nf_proto_net pn;
unsigned int timeouts[UDP_CT_MAX];
};
struct nf_icmp_net {
- struct nf_proto_net pn;
unsigned int timeout;
};
#ifdef CONFIG_NF_CT_PROTO_DCCP
struct nf_dccp_net {
- struct nf_proto_net pn;
int dccp_loose;
unsigned int dccp_timeout[CT_DCCP_MAX + 1];
};
@@ -65,11 +52,23 @@ struct nf_dccp_net {
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct nf_sctp_net {
- struct nf_proto_net pn;
unsigned int timeouts[SCTP_CONNTRACK_MAX];
};
#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+enum gre_conntrack {
+ GRE_CT_UNREPLIED,
+ GRE_CT_REPLIED,
+ GRE_CT_MAX
+};
+
+struct nf_gre_net {
+ struct list_head keymap_list;
+ unsigned int timeouts[GRE_CT_MAX];
+};
+#endif
+
struct nf_ip_net {
struct nf_generic_net generic;
struct nf_tcp_net tcp;
@@ -82,6 +81,9 @@ struct nf_ip_net {
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct nf_sctp_net sctp;
#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ struct nf_gre_net gre;
+#endif
};
struct ct_pcpu {
diff --git a/include/net/netns/xdp.h b/include/net/netns/xdp.h
new file mode 100644
index 000000000000..e5734261ba0a
--- /dev/null
+++ b/include/net/netns/xdp.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_XDP_H__
+#define __NETNS_XDP_H__
+
+#include <linux/rculist.h>
+#include <linux/mutex.h>
+
+struct netns_xdp {
+ struct mutex lock;
+ struct hlist_head list;
+};
+
+#endif /* __NETNS_XDP_H__ */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 9481f2c142e2..7a4957599874 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -580,8 +580,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
-void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
- unsigned int len);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
#ifdef CONFIG_NET_SCHED
int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
void *type_data);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index a7fdab5ee6c3..63843ae5dc81 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -195,7 +195,8 @@ int switchdev_port_obj_del(struct net_device *dev,
int register_switchdev_notifier(struct notifier_block *nb);
int unregister_switchdev_notifier(struct notifier_block *nb);
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
- struct switchdev_notifier_info *info);
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack);
int register_switchdev_blocking_notifier(struct notifier_block *nb);
int unregister_switchdev_blocking_notifier(struct notifier_block *nb);
@@ -267,7 +268,8 @@ static inline int unregister_switchdev_notifier(struct notifier_block *nb)
static inline int call_switchdev_notifiers(unsigned long val,
struct net_device *dev,
- struct switchdev_notifier_info *info)
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
{
return NOTIFY_DONE;
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e0a65c067662..a6e0355921e1 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -406,8 +406,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len);
int tcp_set_rcvlowat(struct sock *sk, int val);
void tcp_data_ready(struct sock *sk);
+#ifdef CONFIG_MMU
int tcp_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
+#endif
void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
@@ -1606,6 +1608,7 @@ struct tcp_fastopen_request {
struct msghdr *data; /* data in MSG_FASTOPEN */
size_t size;
int copied; /* queued in tcp_connect() */
+ struct ubuf_info *uarg;
};
void tcp_free_fastopen_req(struct tcp_sock *tp);
void tcp_fastopen_destroy_cipher(struct sock *sk);
diff --git a/include/net/tls.h b/include/net/tls.h
index 1486b60c4de8..4592606e136a 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -147,12 +147,13 @@ struct tls_sw_context_tx {
struct tls_sw_context_rx {
struct crypto_aead *aead_recv;
struct crypto_wait async_wait;
-
struct strparser strp;
+ struct sk_buff_head rx_list; /* list of decrypted 'data' records */
void (*saved_data_ready)(struct sock *sk);
struct sk_buff *recv_pkt;
u8 control;
+ int async_capable;
bool decrypted;
atomic_t decrypt_pending;
bool async_notify;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 236403eb5ba6..09767819c3d4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -428,7 +428,8 @@ struct switchdev_notifier_vxlan_fdb_info {
int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
struct switchdev_notifier_vxlan_fdb_info *fdb_info);
int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
- struct notifier_block *nb);
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni);
#else
@@ -440,7 +441,8 @@ vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
}
static inline int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
- struct notifier_block *nb)
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 13acb9803a6d..61cf7dbb6782 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -42,6 +42,7 @@ struct xdp_umem {
struct work_struct work;
struct page **pgs;
u32 npgs;
+ int id;
struct net_device *dev;
struct xdp_umem_fq_reuse *fq_reuse;
u16 queue_id;
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index a12692e5f7a8..3066ab3853a8 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -110,4 +110,6 @@
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 62
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 91c43884f295..60b99b730a41 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -14,6 +14,7 @@
/* Extended instruction set based on top of classic BPF */
/* instruction classes */
+#define BPF_JMP32 0x06 /* jmp mode in word width */
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
@@ -2540,6 +2541,7 @@ struct __sk_buff {
__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
__u64 tstamp;
__u32 wire_len;
+ __u32 gso_segs;
};
struct bpf_tunnel_key {
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index caf8dc019250..325395f56bfa 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -108,6 +108,8 @@ struct icmp6hdr {
#define ICMPV6_MOBILE_PREFIX_SOL 146
#define ICMPV6_MOBILE_PREFIX_ADV 147
+#define ICMPV6_MRDISC_ADV 151
+
/*
* Codes for Destination Unreachable
*/
diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h
index 61a1bf6e865e..790585f0e61b 100644
--- a/include/uapi/linux/if_bonding.h
+++ b/include/uapi/linux/if_bonding.h
@@ -117,6 +117,30 @@ struct ad_info {
__u8 partner_system[ETH_ALEN];
};
+/* Embedded inside LINK_XSTATS_TYPE_BOND */
+enum {
+ BOND_XSTATS_UNSPEC,
+ BOND_XSTATS_3AD,
+ __BOND_XSTATS_MAX
+};
+#define BOND_XSTATS_MAX (__BOND_XSTATS_MAX - 1)
+
+/* Embedded inside BOND_XSTATS_3AD */
+enum {
+ BOND_3AD_STAT_LACPDU_RX,
+ BOND_3AD_STAT_LACPDU_TX,
+ BOND_3AD_STAT_LACPDU_UNKNOWN_RX,
+ BOND_3AD_STAT_LACPDU_ILLEGAL_RX,
+ BOND_3AD_STAT_MARKER_RX,
+ BOND_3AD_STAT_MARKER_TX,
+ BOND_3AD_STAT_MARKER_RESP_RX,
+ BOND_3AD_STAT_MARKER_RESP_TX,
+ BOND_3AD_STAT_MARKER_UNKNOWN_RX,
+ BOND_3AD_STAT_PAD,
+ __BOND_3AD_STAT_MAX
+};
+#define BOND_3AD_STAT_MAX (__BOND_3AD_STAT_MAX - 1)
+
#endif /* _LINUX_IF_BONDING_H */
/*
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index d6533828123a..5b225ff63b48 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -925,6 +925,7 @@ enum {
enum {
LINK_XSTATS_TYPE_UNSPEC,
LINK_XSTATS_TYPE_BRIDGE,
+ LINK_XSTATS_TYPE_BOND,
__LINK_XSTATS_TYPE_MAX
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
diff --git a/include/uapi/linux/igmp.h b/include/uapi/linux/igmp.h
index 7e44ac02ca18..90c28bc466c6 100644
--- a/include/uapi/linux/igmp.h
+++ b/include/uapi/linux/igmp.h
@@ -93,6 +93,7 @@ struct igmpv3_query {
#define IGMP_MTRACE_RESP 0x1e
#define IGMP_MTRACE 0x1f
+#define IGMP_MRDISC_ADV 0x30 /* From RFC4286 */
/*
* Use the BSD names for these for compatibility
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index a55cb8b10165..e7ad9d350a28 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -292,10 +292,11 @@ struct sockaddr_in {
#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
/* Defines for Multicast INADDR */
-#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
-#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
-#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
-#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
+#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
+#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
+#define INADDR_ALLSNOOPERS_GROUP 0xe000006aU /* 224.0.0.106 */
+#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
#endif
/* <asm/byteorder.h> contains the htonl type stuff.. */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 7de4f1bdaf06..030302893d96 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -219,6 +219,7 @@ enum nft_chain_attributes {
* @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64)
* @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN)
* @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32)
+ * @NFTA_RULE_POSITION_ID: transaction unique identifier of the previous rule (NLA_U32)
*/
enum nft_rule_attributes {
NFTA_RULE_UNSPEC,
@@ -231,6 +232,7 @@ enum nft_rule_attributes {
NFTA_RULE_USERDATA,
NFTA_RULE_PAD,
NFTA_RULE_ID,
+ NFTA_RULE_POSITION_ID,
__NFTA_RULE_MAX
};
#define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1)
@@ -789,6 +791,8 @@ enum nft_exthdr_attributes {
* @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
* @NFT_META_PRANDOM: a 32bit pseudo-random number
* @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp)
+ * @NFT_META_IIFKIND: packet input interface kind name (dev->rtnl_link_ops->kind)
+ * @NFT_META_OIFKIND: packet output interface kind name (dev->rtnl_link_ops->kind)
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -817,6 +821,8 @@ enum nft_meta_keys {
NFT_META_CGROUP,
NFT_META_PRANDOM,
NFT_META_SECPATH,
+ NFT_META_IIFKIND,
+ NFT_META_OIFKIND,
};
/**
@@ -871,8 +877,8 @@ enum nft_hash_attributes {
NFTA_HASH_SEED,
NFTA_HASH_OFFSET,
NFTA_HASH_TYPE,
- NFTA_HASH_SET_NAME,
- NFTA_HASH_SET_ID,
+ NFTA_HASH_SET_NAME, /* deprecated */
+ NFTA_HASH_SET_ID, /* deprecated */
__NFTA_HASH_MAX,
};
#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 95d0db2a8350..02ac251be8c4 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -333,12 +333,19 @@ enum {
/* Basic filter */
+struct tc_basic_pcnt {
+ __u64 rcnt;
+ __u64 rhit;
+};
+
enum {
TCA_BASIC_UNSPEC,
TCA_BASIC_CLASSID,
TCA_BASIC_EMATCHES,
TCA_BASIC_ACT,
TCA_BASIC_POLICE,
+ TCA_BASIC_PCNT,
+ TCA_BASIC_PAD,
__TCA_BASIC_MAX
};
@@ -527,11 +534,17 @@ enum {
/* Match-all classifier */
+struct tc_matchall_pcnt {
+ __u64 rhit;
+};
+
enum {
TCA_MATCHALL_UNSPEC,
TCA_MATCHALL_CLASSID,
TCA_MATCHALL_ACT,
TCA_MATCHALL_FLAGS,
+ TCA_MATCHALL_PCNT,
+ TCA_MATCHALL_PAD,
__TCA_MATCHALL_MAX,
};
diff --git a/include/uapi/linux/xdp_diag.h b/include/uapi/linux/xdp_diag.h
new file mode 100644
index 000000000000..78b2591a7782
--- /dev/null
+++ b/include/uapi/linux/xdp_diag.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * xdp_diag: interface for query/monitor XDP sockets
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef _LINUX_XDP_DIAG_H
+#define _LINUX_XDP_DIAG_H
+
+#include <linux/types.h>
+
+struct xdp_diag_req {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+ __u16 pad;
+ __u32 xdiag_ino;
+ __u32 xdiag_show;
+ __u32 xdiag_cookie[2];
+};
+
+struct xdp_diag_msg {
+ __u8 xdiag_family;
+ __u8 xdiag_type;
+ __u16 pad;
+ __u32 xdiag_ino;
+ __u32 xdiag_cookie[2];
+};
+
+#define XDP_SHOW_INFO (1 << 0) /* Basic information */
+#define XDP_SHOW_RING_CFG (1 << 1)
+#define XDP_SHOW_UMEM (1 << 2)
+#define XDP_SHOW_MEMINFO (1 << 3)
+
+enum {
+ XDP_DIAG_NONE,
+ XDP_DIAG_INFO,
+ XDP_DIAG_UID,
+ XDP_DIAG_RX_RING,
+ XDP_DIAG_TX_RING,
+ XDP_DIAG_UMEM,
+ XDP_DIAG_UMEM_FILL_RING,
+ XDP_DIAG_UMEM_COMPLETION_RING,
+ XDP_DIAG_MEMINFO,
+ __XDP_DIAG_MAX,
+};
+
+#define XDP_DIAG_MAX (__XDP_DIAG_MAX - 1)
+
+struct xdp_diag_info {
+ __u32 ifindex;
+ __u32 queue_id;
+};
+
+struct xdp_diag_ring {
+ __u32 entries; /*num descs */
+};
+
+#define XDP_DU_F_ZEROCOPY (1 << 0)
+
+struct xdp_diag_umem {
+ __u64 size;
+ __u32 id;
+ __u32 num_pages;
+ __u32 chunk_size;
+ __u32 headroom;
+ __u32 ifindex;
+ __u32 queue_id;
+ __u32 flags;
+ __u32 refs;
+};
+
+#endif /* _LINUX_XDP_DIAG_H */
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index befe570be5ba..3d661f0606fe 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -157,7 +157,7 @@
*
*/
-#define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE)
+#define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
@@ -525,7 +525,7 @@ const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
/*
* Regular int is not a bit field and it must be either
- * u8/u16/u32/u64.
+ * u8/u16/u32/u64 or __int128.
*/
static bool btf_type_int_is_regular(const struct btf_type *t)
{
@@ -538,7 +538,8 @@ static bool btf_type_int_is_regular(const struct btf_type *t)
if (BITS_PER_BYTE_MASKED(nr_bits) ||
BTF_INT_OFFSET(int_data) ||
(nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
- nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) {
+ nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
+ nr_bytes != (2 * sizeof(u64)))) {
return false;
}
@@ -1063,9 +1064,9 @@ static int btf_int_check_member(struct btf_verifier_env *env,
nr_copy_bits = BTF_INT_BITS(int_data) +
BITS_PER_BYTE_MASKED(struct_bits_off);
- if (nr_copy_bits > BITS_PER_U64) {
+ if (nr_copy_bits > BITS_PER_U128) {
btf_verifier_log_member(env, struct_type, member,
- "nr_copy_bits exceeds 64");
+ "nr_copy_bits exceeds 128");
return -EINVAL;
}
@@ -1119,9 +1120,9 @@ static int btf_int_check_kflag_member(struct btf_verifier_env *env,
bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
- if (nr_copy_bits > BITS_PER_U64) {
+ if (nr_copy_bits > BITS_PER_U128) {
btf_verifier_log_member(env, struct_type, member,
- "nr_copy_bits exceeds 64");
+ "nr_copy_bits exceeds 128");
return -EINVAL;
}
@@ -1168,9 +1169,9 @@ static s32 btf_int_check_meta(struct btf_verifier_env *env,
nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
- if (nr_bits > BITS_PER_U64) {
+ if (nr_bits > BITS_PER_U128) {
btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
- BITS_PER_U64);
+ BITS_PER_U128);
return -EINVAL;
}
@@ -1211,31 +1212,93 @@ static void btf_int_log(struct btf_verifier_env *env,
btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
}
+static void btf_int128_print(struct seq_file *m, void *data)
+{
+ /* data points to a __int128 number.
+ * Suppose
+ * int128_num = *(__int128 *)data;
+ * The below formulas shows what upper_num and lower_num represents:
+ * upper_num = int128_num >> 64;
+ * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
+ */
+ u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = *(u64 *)data;
+ lower_num = *(u64 *)(data + 8);
+#else
+ upper_num = *(u64 *)(data + 8);
+ lower_num = *(u64 *)data;
+#endif
+ if (upper_num == 0)
+ seq_printf(m, "0x%llx", lower_num);
+ else
+ seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
+}
+
+static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
+ u16 right_shift_bits)
+{
+ u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = print_num[0];
+ lower_num = print_num[1];
+#else
+ upper_num = print_num[1];
+ lower_num = print_num[0];
+#endif
+
+ /* shake out un-needed bits by shift/or operations */
+ if (left_shift_bits >= 64) {
+ upper_num = lower_num << (left_shift_bits - 64);
+ lower_num = 0;
+ } else {
+ upper_num = (upper_num << left_shift_bits) |
+ (lower_num >> (64 - left_shift_bits));
+ lower_num = lower_num << left_shift_bits;
+ }
+
+ if (right_shift_bits >= 64) {
+ lower_num = upper_num >> (right_shift_bits - 64);
+ upper_num = 0;
+ } else {
+ lower_num = (lower_num >> right_shift_bits) |
+ (upper_num << (64 - right_shift_bits));
+ upper_num = upper_num >> right_shift_bits;
+ }
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ print_num[0] = upper_num;
+ print_num[1] = lower_num;
+#else
+ print_num[0] = lower_num;
+ print_num[1] = upper_num;
+#endif
+}
+
static void btf_bitfield_seq_show(void *data, u8 bits_offset,
u8 nr_bits, struct seq_file *m)
{
u16 left_shift_bits, right_shift_bits;
u8 nr_copy_bytes;
u8 nr_copy_bits;
- u64 print_num;
+ u64 print_num[2] = {};
nr_copy_bits = nr_bits + bits_offset;
nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
- print_num = 0;
- memcpy(&print_num, data, nr_copy_bytes);
+ memcpy(print_num, data, nr_copy_bytes);
#ifdef __BIG_ENDIAN_BITFIELD
left_shift_bits = bits_offset;
#else
- left_shift_bits = BITS_PER_U64 - nr_copy_bits;
+ left_shift_bits = BITS_PER_U128 - nr_copy_bits;
#endif
- right_shift_bits = BITS_PER_U64 - nr_bits;
-
- print_num <<= left_shift_bits;
- print_num >>= right_shift_bits;
+ right_shift_bits = BITS_PER_U128 - nr_bits;
- seq_printf(m, "0x%llx", print_num);
+ btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
+ btf_int128_print(m, print_num);
}
@@ -1250,7 +1313,7 @@ static void btf_int_bits_seq_show(const struct btf *btf,
/*
* bits_offset is at most 7.
- * BTF_INT_OFFSET() cannot exceed 64 bits.
+ * BTF_INT_OFFSET() cannot exceed 128 bits.
*/
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
@@ -1274,6 +1337,9 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
}
switch (nr_bits) {
+ case 128:
+ btf_int128_print(m, data);
+ break;
case 64:
if (sign)
seq_printf(m, "%lld", *(s64 *)data);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index f908b9356025..a7bcb23bee84 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -307,15 +307,16 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
return 0;
}
-static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
- u32 curr, const bool probe_pass)
+static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
+ s32 end_new, u32 curr, const bool probe_pass)
{
const s64 imm_min = S32_MIN, imm_max = S32_MAX;
+ s32 delta = end_new - end_old;
s64 imm = insn->imm;
- if (curr < pos && curr + imm + 1 > pos)
+ if (curr < pos && curr + imm + 1 >= end_old)
imm += delta;
- else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
+ else if (curr >= end_new && curr + imm + 1 < end_new)
imm -= delta;
if (imm < imm_min || imm > imm_max)
return -ERANGE;
@@ -324,15 +325,16 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
return 0;
}
-static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
- u32 curr, const bool probe_pass)
+static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
+ s32 end_new, u32 curr, const bool probe_pass)
{
const s32 off_min = S16_MIN, off_max = S16_MAX;
+ s32 delta = end_new - end_old;
s32 off = insn->off;
- if (curr < pos && curr + off + 1 > pos)
+ if (curr < pos && curr + off + 1 >= end_old)
off += delta;
- else if (curr > pos + delta && curr + off + 1 <= pos + delta)
+ else if (curr >= end_new && curr + off + 1 < end_new)
off -= delta;
if (off < off_min || off > off_max)
return -ERANGE;
@@ -341,10 +343,10 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
return 0;
}
-static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
- const bool probe_pass)
+static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
+ s32 end_new, const bool probe_pass)
{
- u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
+ u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
struct bpf_insn *insn = prog->insnsi;
int ret = 0;
@@ -356,22 +358,23 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
* do any other adjustments. Therefore skip the patchlet.
*/
if (probe_pass && i == pos) {
- i += delta + 1;
- insn++;
+ i = end_new;
+ insn = prog->insnsi + end_old;
}
code = insn->code;
- if (BPF_CLASS(code) != BPF_JMP ||
+ if ((BPF_CLASS(code) != BPF_JMP &&
+ BPF_CLASS(code) != BPF_JMP32) ||
BPF_OP(code) == BPF_EXIT)
continue;
/* Adjust offset of jmps if we cross patch boundaries. */
if (BPF_OP(code) == BPF_CALL) {
if (insn->src_reg != BPF_PSEUDO_CALL)
continue;
- ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
- probe_pass);
+ ret = bpf_adj_delta_to_imm(insn, pos, end_old,
+ end_new, i, probe_pass);
} else {
- ret = bpf_adj_delta_to_off(insn, pos, delta, i,
- probe_pass);
+ ret = bpf_adj_delta_to_off(insn, pos, end_old,
+ end_new, i, probe_pass);
}
if (ret)
break;
@@ -421,7 +424,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
* we afterwards may not fail anymore.
*/
if (insn_adj_cnt > cnt_max &&
- bpf_adj_branches(prog, off, insn_delta, true))
+ bpf_adj_branches(prog, off, off + 1, off + len, true))
return NULL;
/* Several new instructions need to be inserted. Make room
@@ -453,13 +456,25 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
* the ship has sailed to reverse to the original state. An
* overflow cannot happen at this point.
*/
- BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
+ BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
bpf_adj_linfo(prog_adj, off, insn_delta);
return prog_adj;
}
+int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
+{
+ /* Branch offsets can't overflow when program is shrinking, no need
+ * to call bpf_adj_branches(..., true) here
+ */
+ memmove(prog->insnsi + off, prog->insnsi + off + cnt,
+ sizeof(struct bpf_insn) * (prog->len - off - cnt));
+ prog->len -= cnt;
+
+ return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
+}
+
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
{
int i;
@@ -934,6 +949,27 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
break;
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ /* Accommodate for extra offset in case of a backjump. */
+ off = from->off;
+ if (off < 0)
+ off -= 2;
+ *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+ *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+ *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
+ off);
+ break;
+
case BPF_LD | BPF_IMM | BPF_DW:
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
@@ -1130,6 +1166,31 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
INSN_2(JMP, CALL), \
/* Exit instruction. */ \
INSN_2(JMP, EXIT), \
+ /* 32-bit Jump instructions. */ \
+ /* Register based. */ \
+ INSN_3(JMP32, JEQ, X), \
+ INSN_3(JMP32, JNE, X), \
+ INSN_3(JMP32, JGT, X), \
+ INSN_3(JMP32, JLT, X), \
+ INSN_3(JMP32, JGE, X), \
+ INSN_3(JMP32, JLE, X), \
+ INSN_3(JMP32, JSGT, X), \
+ INSN_3(JMP32, JSLT, X), \
+ INSN_3(JMP32, JSGE, X), \
+ INSN_3(JMP32, JSLE, X), \
+ INSN_3(JMP32, JSET, X), \
+ /* Immediate based. */ \
+ INSN_3(JMP32, JEQ, K), \
+ INSN_3(JMP32, JNE, K), \
+ INSN_3(JMP32, JGT, K), \
+ INSN_3(JMP32, JLT, K), \
+ INSN_3(JMP32, JGE, K), \
+ INSN_3(JMP32, JLE, K), \
+ INSN_3(JMP32, JSGT, K), \
+ INSN_3(JMP32, JSLT, K), \
+ INSN_3(JMP32, JSGE, K), \
+ INSN_3(JMP32, JSLE, K), \
+ INSN_3(JMP32, JSET, K), \
/* Jump instructions. */ \
/* Register based. */ \
INSN_3(JMP, JEQ, X), \
@@ -1390,145 +1451,49 @@ select_insn:
out:
CONT;
}
- /* JMP */
JMP_JA:
insn += insn->off;
CONT;
- JMP_JEQ_X:
- if (DST == SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JEQ_K:
- if (DST == IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JNE_X:
- if (DST != SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JNE_K:
- if (DST != IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JGT_X:
- if (DST > SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JGT_K:
- if (DST > IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JLT_X:
- if (DST < SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JLT_K:
- if (DST < IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JGE_X:
- if (DST >= SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JGE_K:
- if (DST >= IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JLE_X:
- if (DST <= SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JLE_K:
- if (DST <= IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSGT_X:
- if (((s64) DST) > ((s64) SRC)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSGT_K:
- if (((s64) DST) > ((s64) IMM)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSLT_X:
- if (((s64) DST) < ((s64) SRC)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSLT_K:
- if (((s64) DST) < ((s64) IMM)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSGE_X:
- if (((s64) DST) >= ((s64) SRC)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSGE_K:
- if (((s64) DST) >= ((s64) IMM)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSLE_X:
- if (((s64) DST) <= ((s64) SRC)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSLE_K:
- if (((s64) DST) <= ((s64) IMM)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSET_X:
- if (DST & SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSET_K:
- if (DST & IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
JMP_EXIT:
return BPF_R0;
-
+ /* JMP */
+#define COND_JMP(SIGN, OPCODE, CMP_OP) \
+ JMP_##OPCODE##_X: \
+ if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
+ insn += insn->off; \
+ CONT_JMP; \
+ } \
+ CONT; \
+ JMP32_##OPCODE##_X: \
+ if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
+ insn += insn->off; \
+ CONT_JMP; \
+ } \
+ CONT; \
+ JMP_##OPCODE##_K: \
+ if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
+ insn += insn->off; \
+ CONT_JMP; \
+ } \
+ CONT; \
+ JMP32_##OPCODE##_K: \
+ if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
+ insn += insn->off; \
+ CONT_JMP; \
+ } \
+ CONT;
+ COND_JMP(u, JEQ, ==)
+ COND_JMP(u, JNE, !=)
+ COND_JMP(u, JGT, >)
+ COND_JMP(u, JLT, <)
+ COND_JMP(u, JGE, >=)
+ COND_JMP(u, JLE, <=)
+ COND_JMP(u, JSET, &)
+ COND_JMP(s, JSGT, >)
+ COND_JMP(s, JSLT, <)
+ COND_JMP(s, JSGE, >=)
+ COND_JMP(s, JSLE, <=)
+#undef COND_JMP
/* STX and ST and LDX*/
#define LDST(SIZEOP, SIZE) \
STX_MEM_##SIZEOP: \
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index d6b76377cb6e..de73f55e42fd 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -67,7 +67,7 @@ const char *const bpf_class_string[8] = {
[BPF_STX] = "stx",
[BPF_ALU] = "alu",
[BPF_JMP] = "jmp",
- [BPF_RET] = "BUG",
+ [BPF_JMP32] = "jmp32",
[BPF_ALU64] = "alu64",
};
@@ -136,23 +136,22 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
else
print_bpf_end_insn(verbose, cbs->private_data, insn);
} else if (BPF_OP(insn->code) == BPF_NEG) {
- verbose(cbs->private_data, "(%02x) r%d = %s-r%d\n",
- insn->code, insn->dst_reg,
- class == BPF_ALU ? "(u32) " : "",
+ verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n",
+ insn->code, class == BPF_ALU ? 'w' : 'r',
+ insn->dst_reg, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg);
} else if (BPF_SRC(insn->code) == BPF_X) {
- verbose(cbs->private_data, "(%02x) %sr%d %s %sr%d\n",
- insn->code, class == BPF_ALU ? "(u32) " : "",
+ verbose(cbs->private_data, "(%02x) %c%d %s %c%d\n",
+ insn->code, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
- class == BPF_ALU ? "(u32) " : "",
+ class == BPF_ALU ? 'w' : 'r',
insn->src_reg);
} else {
- verbose(cbs->private_data, "(%02x) %sr%d %s %s%d\n",
- insn->code, class == BPF_ALU ? "(u32) " : "",
+ verbose(cbs->private_data, "(%02x) %c%d %s %d\n",
+ insn->code, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
- class == BPF_ALU ? "(u32) " : "",
insn->imm);
}
} else if (class == BPF_STX) {
@@ -220,7 +219,7 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code);
return;
}
- } else if (class == BPF_JMP) {
+ } else if (class == BPF_JMP32 || class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
@@ -244,13 +243,18 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
verbose(cbs->private_data, "(%02x) exit\n", insn->code);
} else if (BPF_SRC(insn->code) == BPF_X) {
- verbose(cbs->private_data, "(%02x) if r%d %s r%d goto pc%+d\n",
- insn->code, insn->dst_reg,
+ verbose(cbs->private_data,
+ "(%02x) if %c%d %s %c%d goto pc%+d\n",
+ insn->code, class == BPF_JMP32 ? 'w' : 'r',
+ insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
+ class == BPF_JMP32 ? 'w' : 'r',
insn->src_reg, insn->off);
} else {
- verbose(cbs->private_data, "(%02x) if r%d %s 0x%x goto pc%+d\n",
- insn->code, insn->dst_reg,
+ verbose(cbs->private_data,
+ "(%02x) if %c%d %s 0x%x goto pc%+d\n",
+ insn->code, class == BPF_JMP32 ? 'w' : 'r',
+ insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->imm, insn->off);
}
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 54cf2b9c44a4..39dba8c90331 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -173,6 +173,41 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
return ret;
}
+void
+bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn)
+{
+ const struct bpf_prog_offload_ops *ops;
+ struct bpf_prog_offload *offload;
+ int ret = -EOPNOTSUPP;
+
+ down_read(&bpf_devs_lock);
+ offload = env->prog->aux->offload;
+ if (offload) {
+ ops = offload->offdev->ops;
+ if (!offload->opt_failed && ops->replace_insn)
+ ret = ops->replace_insn(env, off, insn);
+ offload->opt_failed |= ret;
+ }
+ up_read(&bpf_devs_lock);
+}
+
+void
+bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
+{
+ struct bpf_prog_offload *offload;
+ int ret = -EOPNOTSUPP;
+
+ down_read(&bpf_devs_lock);
+ offload = env->prog->aux->offload;
+ if (offload) {
+ if (!offload->opt_failed && offload->offdev->ops->remove_insns)
+ ret = offload->offdev->ops->remove_insns(env, off, cnt);
+ offload->opt_failed |= ret;
+ }
+ up_read(&bpf_devs_lock);
+}
+
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_prog_offload *offload = prog->aux->offload;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 56674a7c3778..8c1c21cd50b4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1095,7 +1095,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++) {
u8 code = insn[i].code;
- if (BPF_CLASS(code) != BPF_JMP)
+ if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next;
@@ -4031,11 +4031,50 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
* 0 - branch will not be taken and fall-through to next insn
* -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
*/
-static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
+static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
+ bool is_jmp32)
{
+ struct bpf_reg_state reg_lo;
+ s64 sval;
+
if (__is_pointer_value(false, reg))
return -1;
+ if (is_jmp32) {
+ reg_lo = *reg;
+ reg = &reg_lo;
+ /* For JMP32, only low 32 bits are compared, coerce_reg_to_size
+ * could truncate high bits and update umin/umax according to
+ * information of low bits.
+ */
+ coerce_reg_to_size(reg, 4);
+ /* smin/smax need special handling. For example, after coerce,
+ * if smin_value is 0x00000000ffffffffLL, the value is -1 when
+ * used as operand to JMP32. It is a negative number from s32's
+ * point of view, while it is a positive number when seen as
+ * s64. The smin/smax are kept as s64, therefore, when used with
+ * JMP32, they need to be transformed into s32, then sign
+ * extended back to s64.
+ *
+ * Also, smin/smax were copied from umin/umax. If umin/umax has
+ * different sign bit, then min/max relationship doesn't
+ * maintain after casting into s32, for this case, set smin/smax
+ * to safest range.
+ */
+ if ((reg->umax_value ^ reg->umin_value) &
+ (1ULL << 31)) {
+ reg->smin_value = S32_MIN;
+ reg->smax_value = S32_MAX;
+ }
+ reg->smin_value = (s64)(s32)reg->smin_value;
+ reg->smax_value = (s64)(s32)reg->smax_value;
+
+ val = (u32)val;
+ sval = (s64)(s32)val;
+ } else {
+ sval = (s64)val;
+ }
+
switch (opcode) {
case BPF_JEQ:
if (tnum_is_const(reg->var_off))
@@ -4058,9 +4097,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return 0;
break;
case BPF_JSGT:
- if (reg->smin_value > (s64)val)
+ if (reg->smin_value > sval)
return 1;
- else if (reg->smax_value < (s64)val)
+ else if (reg->smax_value < sval)
return 0;
break;
case BPF_JLT:
@@ -4070,9 +4109,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return 0;
break;
case BPF_JSLT:
- if (reg->smax_value < (s64)val)
+ if (reg->smax_value < sval)
return 1;
- else if (reg->smin_value >= (s64)val)
+ else if (reg->smin_value >= sval)
return 0;
break;
case BPF_JGE:
@@ -4082,9 +4121,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return 0;
break;
case BPF_JSGE:
- if (reg->smin_value >= (s64)val)
+ if (reg->smin_value >= sval)
return 1;
- else if (reg->smax_value < (s64)val)
+ else if (reg->smax_value < sval)
return 0;
break;
case BPF_JLE:
@@ -4094,9 +4133,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return 0;
break;
case BPF_JSLE:
- if (reg->smax_value <= (s64)val)
+ if (reg->smax_value <= sval)
return 1;
- else if (reg->smin_value > (s64)val)
+ else if (reg->smin_value > sval)
return 0;
break;
}
@@ -4104,6 +4143,29 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return -1;
}
+/* Generate min value of the high 32-bit from TNUM info. */
+static u64 gen_hi_min(struct tnum var)
+{
+ return var.value & ~0xffffffffULL;
+}
+
+/* Generate max value of the high 32-bit from TNUM info. */
+static u64 gen_hi_max(struct tnum var)
+{
+ return (var.value | var.mask) & ~0xffffffffULL;
+}
+
+/* Return true if VAL is compared with a s64 sign extended from s32, and they
+ * are with the same signedness.
+ */
+static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
+{
+ return ((s32)sval >= 0 &&
+ reg->smin_value >= 0 && reg->smax_value <= S32_MAX) ||
+ ((s32)sval < 0 &&
+ reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
+}
+
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
@@ -4111,8 +4173,10 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
*/
static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
- u8 opcode)
+ u8 opcode, bool is_jmp32)
{
+ s64 sval;
+
/* If the dst_reg is a pointer, we can't learn anything about its
* variable offset from the compare (unless src_reg were a pointer into
* the same object, but we don't bother with that.
@@ -4122,19 +4186,31 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
if (__is_pointer_value(false, false_reg))
return;
+ val = is_jmp32 ? (u32)val : val;
+ sval = is_jmp32 ? (s64)(s32)val : (s64)val;
+
switch (opcode) {
case BPF_JEQ:
- /* If this is false then we know nothing Jon Snow, but if it is
- * true then we know for sure.
- */
- __mark_reg_known(true_reg, val);
- break;
case BPF_JNE:
- /* If this is true we know nothing Jon Snow, but if it is false
- * we know the value for sure;
+ {
+ struct bpf_reg_state *reg =
+ opcode == BPF_JEQ ? true_reg : false_reg;
+
+ /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
+ * if it is true we know the value for sure. Likewise for
+ * BPF_JNE.
*/
- __mark_reg_known(false_reg, val);
+ if (is_jmp32) {
+ u64 old_v = reg->var_off.value;
+ u64 hi_mask = ~0xffffffffULL;
+
+ reg->var_off.value = (old_v & hi_mask) | val;
+ reg->var_off.mask &= hi_mask;
+ } else {
+ __mark_reg_known(reg, val);
+ }
break;
+ }
case BPF_JSET:
false_reg->var_off = tnum_and(false_reg->var_off,
tnum_const(~val));
@@ -4142,38 +4218,61 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
true_reg->var_off = tnum_or(true_reg->var_off,
tnum_const(val));
break;
- case BPF_JGT:
- false_reg->umax_value = min(false_reg->umax_value, val);
- true_reg->umin_value = max(true_reg->umin_value, val + 1);
- break;
- case BPF_JSGT:
- false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
- true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
- break;
- case BPF_JLT:
- false_reg->umin_value = max(false_reg->umin_value, val);
- true_reg->umax_value = min(true_reg->umax_value, val - 1);
- break;
- case BPF_JSLT:
- false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
- true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
- break;
case BPF_JGE:
- false_reg->umax_value = min(false_reg->umax_value, val - 1);
- true_reg->umin_value = max(true_reg->umin_value, val);
+ case BPF_JGT:
+ {
+ u64 false_umax = opcode == BPF_JGT ? val : val - 1;
+ u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
+
+ if (is_jmp32) {
+ false_umax += gen_hi_max(false_reg->var_off);
+ true_umin += gen_hi_min(true_reg->var_off);
+ }
+ false_reg->umax_value = min(false_reg->umax_value, false_umax);
+ true_reg->umin_value = max(true_reg->umin_value, true_umin);
break;
+ }
case BPF_JSGE:
- false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
- true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
+ case BPF_JSGT:
+ {
+ s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
+ s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
+
+ /* If the full s64 was not sign-extended from s32 then don't
+ * deduct further info.
+ */
+ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
+ break;
+ false_reg->smax_value = min(false_reg->smax_value, false_smax);
+ true_reg->smin_value = max(true_reg->smin_value, true_smin);
break;
+ }
case BPF_JLE:
- false_reg->umin_value = max(false_reg->umin_value, val + 1);
- true_reg->umax_value = min(true_reg->umax_value, val);
+ case BPF_JLT:
+ {
+ u64 false_umin = opcode == BPF_JLT ? val : val + 1;
+ u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
+
+ if (is_jmp32) {
+ false_umin += gen_hi_min(false_reg->var_off);
+ true_umax += gen_hi_max(true_reg->var_off);
+ }
+ false_reg->umin_value = max(false_reg->umin_value, false_umin);
+ true_reg->umax_value = min(true_reg->umax_value, true_umax);
break;
+ }
case BPF_JSLE:
- false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
- true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
+ case BPF_JSLT:
+ {
+ s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
+ s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
+
+ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
+ break;
+ false_reg->smin_value = max(false_reg->smin_value, false_smin);
+ true_reg->smax_value = min(true_reg->smax_value, true_smax);
break;
+ }
default:
break;
}
@@ -4196,24 +4295,34 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
*/
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
- u8 opcode)
+ u8 opcode, bool is_jmp32)
{
+ s64 sval;
+
if (__is_pointer_value(false, false_reg))
return;
+ val = is_jmp32 ? (u32)val : val;
+ sval = is_jmp32 ? (s64)(s32)val : (s64)val;
+
switch (opcode) {
case BPF_JEQ:
- /* If this is false then we know nothing Jon Snow, but if it is
- * true then we know for sure.
- */
- __mark_reg_known(true_reg, val);
- break;
case BPF_JNE:
- /* If this is true we know nothing Jon Snow, but if it is false
- * we know the value for sure;
- */
- __mark_reg_known(false_reg, val);
+ {
+ struct bpf_reg_state *reg =
+ opcode == BPF_JEQ ? true_reg : false_reg;
+
+ if (is_jmp32) {
+ u64 old_v = reg->var_off.value;
+ u64 hi_mask = ~0xffffffffULL;
+
+ reg->var_off.value = (old_v & hi_mask) | val;
+ reg->var_off.mask &= hi_mask;
+ } else {
+ __mark_reg_known(reg, val);
+ }
break;
+ }
case BPF_JSET:
false_reg->var_off = tnum_and(false_reg->var_off,
tnum_const(~val));
@@ -4221,38 +4330,58 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
true_reg->var_off = tnum_or(true_reg->var_off,
tnum_const(val));
break;
- case BPF_JGT:
- true_reg->umax_value = min(true_reg->umax_value, val - 1);
- false_reg->umin_value = max(false_reg->umin_value, val);
- break;
- case BPF_JSGT:
- true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
- false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
- break;
- case BPF_JLT:
- true_reg->umin_value = max(true_reg->umin_value, val + 1);
- false_reg->umax_value = min(false_reg->umax_value, val);
- break;
- case BPF_JSLT:
- true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
- false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
- break;
case BPF_JGE:
- true_reg->umax_value = min(true_reg->umax_value, val);
- false_reg->umin_value = max(false_reg->umin_value, val + 1);
+ case BPF_JGT:
+ {
+ u64 false_umin = opcode == BPF_JGT ? val : val + 1;
+ u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
+
+ if (is_jmp32) {
+ false_umin += gen_hi_min(false_reg->var_off);
+ true_umax += gen_hi_max(true_reg->var_off);
+ }
+ false_reg->umin_value = max(false_reg->umin_value, false_umin);
+ true_reg->umax_value = min(true_reg->umax_value, true_umax);
break;
+ }
case BPF_JSGE:
- true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
- false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
+ case BPF_JSGT:
+ {
+ s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1;
+ s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
+
+ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
+ break;
+ false_reg->smin_value = max(false_reg->smin_value, false_smin);
+ true_reg->smax_value = min(true_reg->smax_value, true_smax);
break;
+ }
case BPF_JLE:
- true_reg->umin_value = max(true_reg->umin_value, val);
- false_reg->umax_value = min(false_reg->umax_value, val - 1);
+ case BPF_JLT:
+ {
+ u64 false_umax = opcode == BPF_JLT ? val : val - 1;
+ u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
+
+ if (is_jmp32) {
+ false_umax += gen_hi_max(false_reg->var_off);
+ true_umin += gen_hi_min(true_reg->var_off);
+ }
+ false_reg->umax_value = min(false_reg->umax_value, false_umax);
+ true_reg->umin_value = max(true_reg->umin_value, true_umin);
break;
+ }
case BPF_JSLE:
- true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
- false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
+ case BPF_JSLT:
+ {
+ s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1;
+ s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
+
+ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
+ break;
+ false_reg->smax_value = min(false_reg->smax_value, false_smax);
+ true_reg->smin_value = max(true_reg->smin_value, true_smin);
break;
+ }
default:
break;
}
@@ -4390,6 +4519,10 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
if (BPF_SRC(insn->code) != BPF_X)
return false;
+ /* Pointers are always 64-bit. */
+ if (BPF_CLASS(insn->code) == BPF_JMP32)
+ return false;
+
switch (BPF_OP(insn->code)) {
case BPF_JGT:
if ((dst_reg->type == PTR_TO_PACKET &&
@@ -4482,16 +4615,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs;
u8 opcode = BPF_OP(insn->code);
+ bool is_jmp32;
int err;
- if (opcode > BPF_JSLE) {
- verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
+ /* Only conditional jumps are expected to reach here. */
+ if (opcode == BPF_JA || opcode > BPF_JSLE) {
+ verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
return -EINVAL;
}
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
- verbose(env, "BPF_JMP uses reserved fields\n");
+ verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
return -EINVAL;
}
@@ -4507,7 +4642,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
}
} else {
if (insn->src_reg != BPF_REG_0) {
- verbose(env, "BPF_JMP uses reserved fields\n");
+ verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
return -EINVAL;
}
}
@@ -4518,9 +4653,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return err;
dst_reg = &regs[insn->dst_reg];
+ is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
if (BPF_SRC(insn->code) == BPF_K) {
- int pred = is_branch_taken(dst_reg, insn->imm, opcode);
+ int pred = is_branch_taken(dst_reg, insn->imm, opcode,
+ is_jmp32);
if (pred == 1) {
/* only follow the goto, ignore fall-through */
@@ -4548,30 +4685,51 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
* comparable.
*/
if (BPF_SRC(insn->code) == BPF_X) {
+ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
+ struct bpf_reg_state lo_reg0 = *dst_reg;
+ struct bpf_reg_state lo_reg1 = *src_reg;
+ struct bpf_reg_state *src_lo, *dst_lo;
+
+ dst_lo = &lo_reg0;
+ src_lo = &lo_reg1;
+ coerce_reg_to_size(dst_lo, 4);
+ coerce_reg_to_size(src_lo, 4);
+
if (dst_reg->type == SCALAR_VALUE &&
- regs[insn->src_reg].type == SCALAR_VALUE) {
- if (tnum_is_const(regs[insn->src_reg].var_off))
+ src_reg->type == SCALAR_VALUE) {
+ if (tnum_is_const(src_reg->var_off) ||
+ (is_jmp32 && tnum_is_const(src_lo->var_off)))
reg_set_min_max(&other_branch_regs[insn->dst_reg],
- dst_reg, regs[insn->src_reg].var_off.value,
- opcode);
- else if (tnum_is_const(dst_reg->var_off))
+ dst_reg,
+ is_jmp32
+ ? src_lo->var_off.value
+ : src_reg->var_off.value,
+ opcode, is_jmp32);
+ else if (tnum_is_const(dst_reg->var_off) ||
+ (is_jmp32 && tnum_is_const(dst_lo->var_off)))
reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
- &regs[insn->src_reg],
- dst_reg->var_off.value, opcode);
- else if (opcode == BPF_JEQ || opcode == BPF_JNE)
+ src_reg,
+ is_jmp32
+ ? dst_lo->var_off.value
+ : dst_reg->var_off.value,
+ opcode, is_jmp32);
+ else if (!is_jmp32 &&
+ (opcode == BPF_JEQ || opcode == BPF_JNE))
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg],
- &regs[insn->src_reg],
- &regs[insn->dst_reg], opcode);
+ src_reg, dst_reg, opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch_regs[insn->dst_reg],
- dst_reg, insn->imm, opcode);
+ dst_reg, insn->imm, opcode, is_jmp32);
}
- /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
- if (BPF_SRC(insn->code) == BPF_K &&
+ /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
+ * NOTE: these optimizations below are related with pointer comparison
+ * which will never be JMP32.
+ */
+ if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
reg_type_may_be_null(dst_reg->type)) {
/* Mark all identical registers in each branch as either
@@ -4900,7 +5058,8 @@ peek_stack:
goto check_state;
t = insn_stack[cur_stack - 1];
- if (BPF_CLASS(insns[t].code) == BPF_JMP) {
+ if (BPF_CLASS(insns[t].code) == BPF_JMP ||
+ BPF_CLASS(insns[t].code) == BPF_JMP32) {
u8 opcode = BPF_OP(insns[t].code);
if (opcode == BPF_EXIT) {
@@ -4997,13 +5156,14 @@ static int check_btf_func(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- u32 i, nfuncs, urec_size, min_size, prev_offset;
+ u32 i, nfuncs, urec_size, min_size;
u32 krec_size = sizeof(struct bpf_func_info);
struct bpf_func_info *krecord;
const struct btf_type *type;
struct bpf_prog *prog;
const struct btf *btf;
void __user *urecord;
+ u32 prev_offset = 0;
int ret = 0;
nfuncs = attr->func_info_cnt;
@@ -6055,7 +6215,7 @@ static int do_check(struct bpf_verifier_env *env)
if (err)
return err;
- } else if (class == BPF_JMP) {
+ } else if (class == BPF_JMP || class == BPF_JMP32) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
@@ -6063,7 +6223,8 @@ static int do_check(struct bpf_verifier_env *env)
insn->off != 0 ||
(insn->src_reg != BPF_REG_0 &&
insn->src_reg != BPF_PSEUDO_CALL) ||
- insn->dst_reg != BPF_REG_0) {
+ insn->dst_reg != BPF_REG_0 ||
+ class == BPF_JMP32) {
verbose(env, "BPF_CALL uses reserved fields\n");
return -EINVAL;
}
@@ -6079,7 +6240,8 @@ static int do_check(struct bpf_verifier_env *env)
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
- insn->dst_reg != BPF_REG_0) {
+ insn->dst_reg != BPF_REG_0 ||
+ class == BPF_JMP32) {
verbose(env, "BPF_JA uses reserved fields\n");
return -EINVAL;
}
@@ -6091,7 +6253,8 @@ static int do_check(struct bpf_verifier_env *env)
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
- insn->dst_reg != BPF_REG_0) {
+ insn->dst_reg != BPF_REG_0 ||
+ class == BPF_JMP32) {
verbose(env, "BPF_EXIT uses reserved fields\n");
return -EINVAL;
}
@@ -6431,6 +6594,153 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return new_prog;
}
+static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
+ u32 off, u32 cnt)
+{
+ int i, j;
+
+ /* find first prog starting at or after off (first to remove) */
+ for (i = 0; i < env->subprog_cnt; i++)
+ if (env->subprog_info[i].start >= off)
+ break;
+ /* find first prog starting at or after off + cnt (first to stay) */
+ for (j = i; j < env->subprog_cnt; j++)
+ if (env->subprog_info[j].start >= off + cnt)
+ break;
+ /* if j doesn't start exactly at off + cnt, we are just removing
+ * the front of previous prog
+ */
+ if (env->subprog_info[j].start != off + cnt)
+ j--;
+
+ if (j > i) {
+ struct bpf_prog_aux *aux = env->prog->aux;
+ int move;
+
+ /* move fake 'exit' subprog as well */
+ move = env->subprog_cnt + 1 - j;
+
+ memmove(env->subprog_info + i,
+ env->subprog_info + j,
+ sizeof(*env->subprog_info) * move);
+ env->subprog_cnt -= j - i;
+
+ /* remove func_info */
+ if (aux->func_info) {
+ move = aux->func_info_cnt - j;
+
+ memmove(aux->func_info + i,
+ aux->func_info + j,
+ sizeof(*aux->func_info) * move);
+ aux->func_info_cnt -= j - i;
+ /* func_info->insn_off is set after all code rewrites,
+ * in adjust_btf_func() - no need to adjust
+ */
+ }
+ } else {
+ /* convert i from "first prog to remove" to "first to adjust" */
+ if (env->subprog_info[i].start == off)
+ i++;
+ }
+
+ /* update fake 'exit' subprog as well */
+ for (; i <= env->subprog_cnt; i++)
+ env->subprog_info[i].start -= cnt;
+
+ return 0;
+}
+
+static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
+ u32 cnt)
+{
+ struct bpf_prog *prog = env->prog;
+ u32 i, l_off, l_cnt, nr_linfo;
+ struct bpf_line_info *linfo;
+
+ nr_linfo = prog->aux->nr_linfo;
+ if (!nr_linfo)
+ return 0;
+
+ linfo = prog->aux->linfo;
+
+ /* find first line info to remove, count lines to be removed */
+ for (i = 0; i < nr_linfo; i++)
+ if (linfo[i].insn_off >= off)
+ break;
+
+ l_off = i;
+ l_cnt = 0;
+ for (; i < nr_linfo; i++)
+ if (linfo[i].insn_off < off + cnt)
+ l_cnt++;
+ else
+ break;
+
+ /* First live insn doesn't match first live linfo, it needs to "inherit"
+ * last removed linfo. prog is already modified, so prog->len == off
+ * means no live instructions after (tail of the program was removed).
+ */
+ if (prog->len != off && l_cnt &&
+ (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
+ l_cnt--;
+ linfo[--i].insn_off = off + cnt;
+ }
+
+ /* remove the line info which refer to the removed instructions */
+ if (l_cnt) {
+ memmove(linfo + l_off, linfo + i,
+ sizeof(*linfo) * (nr_linfo - i));
+
+ prog->aux->nr_linfo -= l_cnt;
+ nr_linfo = prog->aux->nr_linfo;
+ }
+
+ /* pull all linfo[i].insn_off >= off + cnt in by cnt */
+ for (i = l_off; i < nr_linfo; i++)
+ linfo[i].insn_off -= cnt;
+
+ /* fix up all subprogs (incl. 'exit') which start >= off */
+ for (i = 0; i <= env->subprog_cnt; i++)
+ if (env->subprog_info[i].linfo_idx > l_off) {
+ /* program may have started in the removed region but
+ * may not be fully removed
+ */
+ if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
+ env->subprog_info[i].linfo_idx -= l_cnt;
+ else
+ env->subprog_info[i].linfo_idx = l_off;
+ }
+
+ return 0;
+}
+
+static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ unsigned int orig_prog_len = env->prog->len;
+ int err;
+
+ if (bpf_prog_is_dev_bound(env->prog->aux))
+ bpf_prog_offload_remove_insns(env, off, cnt);
+
+ err = bpf_remove_insns(env->prog, off, cnt);
+ if (err)
+ return err;
+
+ err = adjust_subprog_starts_after_remove(env, off, cnt);
+ if (err)
+ return err;
+
+ err = bpf_adj_linfo_after_remove(env, off, cnt);
+ if (err)
+ return err;
+
+ memmove(aux_data + off, aux_data + off + cnt,
+ sizeof(*aux_data) * (orig_prog_len - off - cnt));
+
+ return 0;
+}
+
/* The verifier does more data flow analysis than llvm and will not
* explore branches that are dead at run time. Malicious programs can
* have dead code too. Therefore replace all dead at-run-time code
@@ -6457,6 +6767,91 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
}
}
+static bool insn_is_cond_jump(u8 code)
+{
+ u8 op;
+
+ if (BPF_CLASS(code) == BPF_JMP32)
+ return true;
+
+ if (BPF_CLASS(code) != BPF_JMP)
+ return false;
+
+ op = BPF_OP(code);
+ return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
+}
+
+static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+ struct bpf_insn *insn = env->prog->insnsi;
+ const int insn_cnt = env->prog->len;
+ int i;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ if (!insn_is_cond_jump(insn->code))
+ continue;
+
+ if (!aux_data[i + 1].seen)
+ ja.off = insn->off;
+ else if (!aux_data[i + 1 + insn->off].seen)
+ ja.off = 0;
+ else
+ continue;
+
+ if (bpf_prog_is_dev_bound(env->prog->aux))
+ bpf_prog_offload_replace_insn(env, i, &ja);
+
+ memcpy(insn, &ja, sizeof(ja));
+ }
+}
+
+static int opt_remove_dead_code(struct bpf_verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ int insn_cnt = env->prog->len;
+ int i, err;
+
+ for (i = 0; i < insn_cnt; i++) {
+ int j;
+
+ j = 0;
+ while (i + j < insn_cnt && !aux_data[i + j].seen)
+ j++;
+ if (!j)
+ continue;
+
+ err = verifier_remove_insns(env, i, j);
+ if (err)
+ return err;
+ insn_cnt = env->prog->len;
+ }
+
+ return 0;
+}
+
+static int opt_remove_nops(struct bpf_verifier_env *env)
+{
+ const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+ int i, err;
+
+ for (i = 0; i < insn_cnt; i++) {
+ if (memcmp(&insn[i], &ja, sizeof(ja)))
+ continue;
+
+ err = verifier_remove_insns(env, i, 1);
+ if (err)
+ return err;
+ insn_cnt--;
+ i--;
+ }
+
+ return 0;
+}
+
/* convert load instructions that access fields of a context type into a
* sequence of instructions that access fields of the underlying structure:
* struct __sk_buff -> struct sk_buff
@@ -7147,7 +7542,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
{
struct bpf_verifier_env *env;
struct bpf_verifier_log *log;
- int ret = -EINVAL;
+ int i, len, ret = -EINVAL;
+ bool is_priv;
/* no program is valid */
if (ARRAY_SIZE(bpf_verifier_ops) == 0)
@@ -7161,12 +7557,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
return -ENOMEM;
log = &env->log;
+ len = (*prog)->len;
env->insn_aux_data =
- vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
- (*prog)->len));
+ vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
+ for (i = 0; i < len; i++)
+ env->insn_aux_data[i].orig_idx = i;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
@@ -7194,6 +7592,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
env->strict_alignment = false;
+ is_priv = capable(CAP_SYS_ADMIN);
+ env->allow_ptr_leaks = is_priv;
+
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
@@ -7211,8 +7612,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (!env->explored_states)
goto skip_full_check;
- env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
-
ret = check_subprogs(env);
if (ret < 0)
goto skip_full_check;
@@ -7242,8 +7641,17 @@ skip_full_check:
ret = check_max_stack_depth(env);
/* instruction rewrites happen after this point */
- if (ret == 0)
- sanitize_dead_code(env);
+ if (is_priv) {
+ if (ret == 0)
+ opt_hard_wire_dead_code_branches(env);
+ if (ret == 0)
+ ret = opt_remove_dead_code(env);
+ if (ret == 0)
+ ret = opt_remove_nops(env);
+ } else {
+ if (ret == 0)
+ sanitize_dead_code(env);
+ }
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 69244e4598f5..1dd70f048e7b 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -674,7 +674,7 @@ static void batadv_mcast_mla_update(struct work_struct *work)
*/
static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
{
- if (ip_mc_check_igmp(skb, NULL) < 0)
+ if (ip_mc_check_igmp(skb) < 0)
return false;
switch (igmp_hdr(skb)->type) {
@@ -741,7 +741,7 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
*/
static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
{
- if (ipv6_mc_check_mld(skb, NULL) < 0)
+ if (ipv6_mc_check_mld(skb) < 0)
return false;
switch (icmp6_hdr(skb)->icmp6_type) {
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..2c5172b33209 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -240,3 +240,85 @@ out:
kfree(data);
return ret;
}
+
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ u32 size = kattr->test.data_size_in;
+ u32 repeat = kattr->test.repeat;
+ struct bpf_flow_keys flow_keys;
+ u64 time_start, time_spent = 0;
+ struct bpf_skb_data_end *cb;
+ u32 retval, duration;
+ struct sk_buff *skb;
+ struct sock *sk;
+ void *data;
+ int ret;
+ u32 i;
+
+ if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
+ return -EINVAL;
+
+ data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ sk = kzalloc(sizeof(*sk), GFP_USER);
+ if (!sk) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ sock_net_set(sk, current->nsproxy->net_ns);
+ sock_init_data(NULL, sk);
+
+ skb = build_skb(data, 0);
+ if (!skb) {
+ kfree(data);
+ kfree(sk);
+ return -ENOMEM;
+ }
+ skb->sk = sk;
+
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ __skb_put(skb, size);
+ skb->protocol = eth_type_trans(skb,
+ current->nsproxy->net_ns->loopback_dev);
+ skb_reset_network_header(skb);
+
+ cb = (struct bpf_skb_data_end *)skb->cb;
+ cb->qdisc_cb.flow_keys = &flow_keys;
+
+ if (!repeat)
+ repeat = 1;
+
+ time_start = ktime_get_ns();
+ for (i = 0; i < repeat; i++) {
+ preempt_disable();
+ rcu_read_lock();
+ retval = __skb_flow_bpf_dissect(prog, skb,
+ &flow_keys_dissector,
+ &flow_keys);
+ rcu_read_unlock();
+ preempt_enable();
+
+ if (need_resched()) {
+ if (signal_pending(current))
+ break;
+ time_spent += ktime_get_ns() - time_start;
+ cond_resched();
+ time_start = ktime_get_ns();
+ }
+ }
+ time_spent += ktime_get_ns() - time_start;
+ do_div(time_spent, repeat);
+ duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
+
+ ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
+ retval, duration);
+
+ kfree_skb(skb);
+ kfree(sk);
+ return ret;
+}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 9e14767500ea..00573cc46c98 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -915,7 +915,8 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
/* Add new permanent fdb entry with RTM_NEWNEIGH */
int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid, u16 nlh_flags)
+ const unsigned char *addr, u16 vid, u16 nlh_flags,
+ struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3aeff0895669..780757b7a82f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/if_ether.h>
#include <linux/igmp.h>
+#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/log2.h>
@@ -29,6 +30,7 @@
#include <net/ip.h>
#include <net/switchdev.h>
#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/icmpv6.h>
#include <net/ipv6.h>
#include <net/mld.h>
#include <net/ip6_checksum.h>
@@ -938,7 +940,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
for (i = 0; i < num; i++) {
len += sizeof(*grec);
- if (!pskb_may_pull(skb, len))
+ if (!ip_mc_may_pull(skb, len))
return -EINVAL;
grec = (void *)(skb->data + len - sizeof(*grec));
@@ -946,7 +948,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
type = grec->grec_type;
len += ntohs(grec->grec_nsrcs) * 4;
- if (!pskb_may_pull(skb, len))
+ if (!ip_mc_may_pull(skb, len))
return -EINVAL;
/* We treat this as an IGMPv2 report for now. */
@@ -985,15 +987,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
+ unsigned int nsrcs_offset;
const unsigned char *src;
struct icmp6hdr *icmp6h;
struct mld2_grec *grec;
+ unsigned int grec_len;
int i;
int len;
int num;
int err = 0;
- if (!pskb_may_pull(skb, sizeof(*icmp6h)))
+ if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
return -EINVAL;
icmp6h = icmp6_hdr(skb);
@@ -1003,21 +1007,25 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
for (i = 0; i < num; i++) {
__be16 *nsrcs, _nsrcs;
- nsrcs = skb_header_pointer(skb,
- len + offsetof(struct mld2_grec,
- grec_nsrcs),
+ nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
+
+ if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
+ nsrcs_offset + sizeof(_nsrcs))
+ return -EINVAL;
+
+ nsrcs = skb_header_pointer(skb, nsrcs_offset,
sizeof(_nsrcs), &_nsrcs);
if (!nsrcs)
return -EINVAL;
- if (!pskb_may_pull(skb,
- len + sizeof(*grec) +
- sizeof(struct in6_addr) * ntohs(*nsrcs)))
+ grec_len = sizeof(*grec) +
+ sizeof(struct in6_addr) * ntohs(*nsrcs);
+
+ if (!ipv6_mc_may_pull(skb, len + grec_len))
return -EINVAL;
grec = (struct mld2_grec *)(skb->data + len);
- len += sizeof(*grec) +
- sizeof(struct in6_addr) * ntohs(*nsrcs);
+ len += grec_len;
/* We treat these as MLDv1 reports for now. */
switch (grec->grec_type) {
@@ -1219,6 +1227,7 @@ static void br_ip4_multicast_query(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
+ unsigned int transport_len = ip_transport_len(skb);
const struct iphdr *iph = ip_hdr(skb);
struct igmphdr *ih = igmp_hdr(skb);
struct net_bridge_mdb_entry *mp;
@@ -1228,7 +1237,6 @@ static void br_ip4_multicast_query(struct net_bridge *br,
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
- unsigned int offset = skb_transport_offset(skb);
__be32 group;
spin_lock(&br->multicast_lock);
@@ -1238,14 +1246,14 @@ static void br_ip4_multicast_query(struct net_bridge *br,
group = ih->group;
- if (skb->len == offset + sizeof(*ih)) {
+ if (transport_len == sizeof(*ih)) {
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
if (!max_delay) {
max_delay = 10 * HZ;
group = 0;
}
- } else if (skb->len >= offset + sizeof(*ih3)) {
+ } else if (transport_len >= sizeof(*ih3)) {
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs)
goto out;
@@ -1296,6 +1304,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
+ unsigned int transport_len = ipv6_transport_len(skb);
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld_msg *mld;
struct net_bridge_mdb_entry *mp;
@@ -1315,7 +1324,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- if (skb->len == offset + sizeof(*mld)) {
+ if (transport_len == sizeof(*mld)) {
if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
err = -EINVAL;
goto out;
@@ -1576,17 +1585,29 @@ static void br_multicast_pim(struct net_bridge *br,
br_multicast_mark_router(br, port);
}
+static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
+ igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
+ return -ENOMSG;
+
+ br_multicast_mark_router(br, port);
+
+ return 0;
+}
+
static int br_multicast_ipv4_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
- struct sk_buff *skb_trimmed = NULL;
const unsigned char *src;
struct igmphdr *ih;
int err;
- err = ip_mc_check_igmp(skb, &skb_trimmed);
+ err = ip_mc_check_igmp(skb);
if (err == -ENOMSG) {
if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
@@ -1594,7 +1615,15 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
if (ip_hdr(skb)->protocol == IPPROTO_PIM)
br_multicast_pim(br, port, skb);
+ } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
+ err = br_ip4_multicast_mrd_rcv(br, port, skb);
+
+ if (err < 0 && err != -ENOMSG) {
+ br_multicast_err_count(br, port, skb->protocol);
+ return err;
+ }
}
+
return 0;
} else if (err < 0) {
br_multicast_err_count(br, port, skb->protocol);
@@ -1612,19 +1641,16 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
- err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
+ err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
- br_ip4_multicast_query(br, port, skb_trimmed, vid);
+ br_ip4_multicast_query(br, port, skb, vid);
break;
case IGMP_HOST_LEAVE_MESSAGE:
br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
break;
}
- if (skb_trimmed && skb_trimmed != skb)
- kfree_skb(skb_trimmed);
-
br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
BR_MCAST_DIR_RX);
@@ -1632,21 +1658,51 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
}
#if IS_ENABLED(CONFIG_IPV6)
+static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
+ return -ENOMSG;
+
+ ret = ipv6_mc_check_icmpv6(skb);
+ if (ret < 0)
+ return ret;
+
+ if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
+ return -ENOMSG;
+
+ br_multicast_mark_router(br, port);
+
+ return 0;
+}
+
static int br_multicast_ipv6_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
- struct sk_buff *skb_trimmed = NULL;
const unsigned char *src;
struct mld_msg *mld;
int err;
- err = ipv6_mc_check_mld(skb, &skb_trimmed);
+ err = ipv6_mc_check_mld(skb);
if (err == -ENOMSG) {
if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
+
+ if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
+ err = br_ip6_multicast_mrd_rcv(br, port, skb);
+
+ if (err < 0 && err != -ENOMSG) {
+ br_multicast_err_count(br, port, skb->protocol);
+ return err;
+ }
+ }
+
return 0;
} else if (err < 0) {
br_multicast_err_count(br, port, skb->protocol);
@@ -1664,10 +1720,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
src);
break;
case ICMPV6_MLD2_REPORT:
- err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
+ err = br_ip6_multicast_mld2_report(br, port, skb, vid);
break;
case ICMPV6_MGM_QUERY:
- err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
+ err = br_ip6_multicast_query(br, port, skb, vid);
break;
case ICMPV6_MGM_REDUCTION:
src = eth_hdr(skb)->h_source;
@@ -1675,9 +1731,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
break;
}
- if (skb_trimmed && skb_trimmed != skb)
- kfree_skb(skb_trimmed);
-
br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
BR_MCAST_DIR_RX);
@@ -1781,6 +1834,68 @@ void br_multicast_init(struct net_bridge *br)
INIT_HLIST_HEAD(&br->mdb_list);
}
+static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
+{
+ struct in_device *in_dev = in_dev_get(br->dev);
+
+ if (!in_dev)
+ return;
+
+ ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP));
+ in_dev_put(in_dev);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
+{
+ struct in6_addr addr;
+
+ ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
+ ipv6_dev_mc_inc(br->dev, &addr);
+}
+#else
+static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
+{
+}
+#endif
+
+static void br_multicast_join_snoopers(struct net_bridge *br)
+{
+ br_ip4_multicast_join_snoopers(br);
+ br_ip6_multicast_join_snoopers(br);
+}
+
+static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
+{
+ struct in_device *in_dev = in_dev_get(br->dev);
+
+ if (WARN_ON(!in_dev))
+ return;
+
+ ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP));
+ in_dev_put(in_dev);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
+{
+ struct in6_addr addr;
+
+ ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
+ ipv6_dev_mc_dec(br->dev, &addr);
+}
+#else
+static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
+{
+}
+#endif
+
+static void br_multicast_leave_snoopers(struct net_bridge *br)
+{
+ br_ip4_multicast_leave_snoopers(br);
+ br_ip6_multicast_leave_snoopers(br);
+}
+
static void __br_multicast_open(struct net_bridge *br,
struct bridge_mcast_own_query *query)
{
@@ -1794,6 +1909,9 @@ static void __br_multicast_open(struct net_bridge *br,
void br_multicast_open(struct net_bridge *br)
{
+ if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ br_multicast_join_snoopers(br);
+
__br_multicast_open(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
__br_multicast_open(br, &br->ip6_own_query);
@@ -1809,6 +1927,9 @@ void br_multicast_stop(struct net_bridge *br)
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
+
+ if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ br_multicast_leave_snoopers(br);
}
void br_multicast_dev_del(struct net_bridge *br)
@@ -1944,8 +2065,10 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
br_mc_disabled_update(br->dev, val);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
- if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
+ br_multicast_leave_snoopers(br);
goto unlock;
+ }
if (!netif_running(br->dev))
goto unlock;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index c93c35bb73dd..40d058378b52 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -881,11 +881,6 @@ static const struct nf_br_ops br_ops = {
.br_dev_xmit_hook = br_nf_dev_xmit,
};
-void br_netfilter_enable(void)
-{
-}
-EXPORT_SYMBOL_GPL(br_netfilter_enable);
-
/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
* br_dev_queue_push_xmit is called afterwards */
static const struct nf_hook_ops br_nf_ops[] = {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index eabf8bf28a3f..00deef7fc1f3 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -573,7 +573,8 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr, u16 vid);
int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
- const unsigned char *addr, u16 vid, u16 nlh_flags);
+ const unsigned char *addr, u16 vid, u16 nlh_flags,
+ struct netlink_ext_ack *extack);
int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev, struct net_device *fdev, int *idx);
int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 035ff59d9cbd..4d2b9eb7604a 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -113,7 +113,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
info.added_by_user = added_by_user;
info.offloaded = offloaded;
notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
- call_switchdev_notifiers(notifier_type, dev, &info.info);
+ call_switchdev_notifiers(notifier_type, dev, &info.info, NULL);
}
void
diff --git a/net/core/dst.c b/net/core/dst.c
index 81ccf20e2826..a263309df115 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -98,8 +98,12 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
struct dst_entry *dst;
if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
- if (ops->gc(ops))
+ if (ops->gc(ops)) {
+ printk_ratelimited(KERN_NOTICE "Route cache is full: "
+ "consider increasing sysctl "
+ "net.ipv[4|6].route.max_size.\n");
return NULL;
+ }
}
dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
diff --git a/net/core/filter.c b/net/core/filter.c
index 7559d6835ecb..41984ad4b9b4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6708,6 +6708,27 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
target_size));
break;
+ case offsetof(struct __sk_buff, gso_segs):
+ /* si->dst_reg = skb_shinfo(SKB); */
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, head));
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+ BPF_REG_AX, si->src_reg,
+ offsetof(struct sk_buff, end));
+ *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
+#else
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, end));
+#endif
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
+ si->dst_reg, si->dst_reg,
+ bpf_target_off(struct skb_shared_info,
+ gso_segs, 2,
+ target_size));
+ break;
case offsetof(struct __sk_buff, wire_len):
BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
@@ -7698,6 +7719,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
};
const struct bpf_prog_ops flow_dissector_prog_ops = {
+ .test_run = bpf_prog_test_run_flow_dissector,
};
int sk_detach_filter(struct sock *sk)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 9f2840510e63..bb1a54747d64 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -683,6 +683,46 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
}
}
+bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
+ const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ struct bpf_flow_keys *flow_keys)
+{
+ struct bpf_skb_data_end cb_saved;
+ struct bpf_skb_data_end *cb;
+ u32 result;
+
+ /* Note that even though the const qualifier is discarded
+ * throughout the execution of the BPF program, all changes(the
+ * control block) are reverted after the BPF program returns.
+ * Therefore, __skb_flow_dissect does not alter the skb.
+ */
+
+ cb = (struct bpf_skb_data_end *)skb->cb;
+
+ /* Save Control Block */
+ memcpy(&cb_saved, cb, sizeof(cb_saved));
+ memset(cb, 0, sizeof(*cb));
+
+ /* Pass parameters to the BPF program */
+ memset(flow_keys, 0, sizeof(*flow_keys));
+ cb->qdisc_cb.flow_keys = flow_keys;
+ flow_keys->nhoff = skb_network_offset(skb);
+ flow_keys->thoff = flow_keys->nhoff;
+
+ bpf_compute_data_pointers((struct sk_buff *)skb);
+ result = BPF_PROG_RUN(prog, skb);
+
+ /* Restore state */
+ memcpy(cb, &cb_saved, sizeof(cb_saved));
+
+ flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
+ flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
+ flow_keys->nhoff, skb->len);
+
+ return result == BPF_OK;
+}
+
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -714,7 +754,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_vlan *key_vlan;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
- struct bpf_prog *attached = NULL;
int num_hdrs = 0;
u8 ip_proto = 0;
bool ret;
@@ -754,53 +793,30 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
- rcu_read_lock();
if (skb) {
+ struct bpf_flow_keys flow_keys;
+ struct bpf_prog *attached = NULL;
+
+ rcu_read_lock();
+
if (skb->dev)
attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
else if (skb->sk)
attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
else
WARN_ON_ONCE(1);
- }
- if (attached) {
- /* Note that even though the const qualifier is discarded
- * throughout the execution of the BPF program, all changes(the
- * control block) are reverted after the BPF program returns.
- * Therefore, __skb_flow_dissect does not alter the skb.
- */
- struct bpf_flow_keys flow_keys = {};
- struct bpf_skb_data_end cb_saved;
- struct bpf_skb_data_end *cb;
- u32 result;
-
- cb = (struct bpf_skb_data_end *)skb->cb;
-
- /* Save Control Block */
- memcpy(&cb_saved, cb, sizeof(cb_saved));
- memset(cb, 0, sizeof(cb_saved));
- /* Pass parameters to the BPF program */
- cb->qdisc_cb.flow_keys = &flow_keys;
- flow_keys.nhoff = nhoff;
- flow_keys.thoff = nhoff;
-
- bpf_compute_data_pointers((struct sk_buff *)skb);
- result = BPF_PROG_RUN(attached, skb);
-
- /* Restore state */
- memcpy(cb, &cb_saved, sizeof(cb_saved));
-
- flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len);
- flow_keys.thoff = clamp_t(u16, flow_keys.thoff,
- flow_keys.nhoff, skb->len);
-
- __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
- target_container);
+ if (attached) {
+ ret = __skb_flow_bpf_dissect(attached, skb,
+ flow_dissector,
+ &flow_keys);
+ __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
+ target_container);
+ rcu_read_unlock();
+ return ret;
+ }
rcu_read_unlock();
- return result == BPF_OK;
}
- rcu_read_unlock();
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b02fb19df2cc..17f36317363d 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -778,6 +778,41 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int rtnl_net_valid_getid_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= NETNSA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NETNSA_PID:
+ case NETNSA_FD:
+ case NETNSA_NSID:
+ case NETNSA_TARGET_NSID:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -793,8 +828,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *msg;
int err;
- err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
- rtnl_net_policy, extack);
+ err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
if (err < 0)
return err;
if (tb[NETNSA_PID]) {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5ea1bed08ede..f5a98082ac7a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3242,6 +3242,53 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
return ret;
}
+static int rtnl_valid_getlink_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct ifinfomsg *ifm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for get link");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
+ extack);
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+ ifm->ifi_change) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
+ extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= IFLA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case IFLA_IFNAME:
+ case IFLA_EXT_MASK:
+ case IFLA_TARGET_NETNSID:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -3256,7 +3303,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u32 ext_filter_mask = 0;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
+ err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
if (err < 0)
return err;
@@ -3639,7 +3686,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
const struct net_device_ops *ops = br_dev->netdev_ops;
err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
- nlh->nlmsg_flags);
+ nlh->nlmsg_flags, extack);
if (err)
goto out;
else
@@ -3651,7 +3698,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
if (dev->netdev_ops->ndo_fdb_add)
err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
vid,
- nlh->nlmsg_flags);
+ nlh->nlmsg_flags,
+ extack);
else
err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
nlh->nlmsg_flags);
@@ -4901,6 +4949,40 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
return size;
}
+static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
+ bool is_dump, struct netlink_ext_ack *extack)
+{
+ struct if_stats_msg *ifsm;
+
+ if (nlh->nlmsg_len < sizeof(*ifsm)) {
+ NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
+ return -EINVAL;
+ }
+
+ if (!strict_check)
+ return 0;
+
+ ifsm = nlmsg_data(nlh);
+
+ /* only requests using strict checks can pass data to influence
+ * the dump. The legacy exception is filter_mask.
+ */
+ if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
+ return -EINVAL;
+ }
+ if (nlmsg_attrlen(nlh, sizeof(*ifsm))) {
+ NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
+ return -EINVAL;
+ }
+ if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
+ NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -4912,8 +4994,10 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 filter_mask;
int err;
- if (nlmsg_len(nlh) < sizeof(*ifsm))
- return -EINVAL;
+ err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
+ false, extack);
+ if (err)
+ return err;
ifsm = nlmsg_data(nlh);
if (ifsm->ifindex > 0)
@@ -4965,27 +5049,11 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = net->dev_base_seq;
- if (nlmsg_len(cb->nlh) < sizeof(*ifsm)) {
- NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
- return -EINVAL;
- }
+ err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
+ if (err)
+ return err;
ifsm = nlmsg_data(cb->nlh);
-
- /* only requests using strict checks can pass data to influence
- * the dump. The legacy exception is filter_mask.
- */
- if (cb->strict_check) {
- if (ifsm->pad1 || ifsm->pad2 || ifsm->ifindex) {
- NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
- return -EINVAL;
- }
- if (nlmsg_attrlen(cb->nlh, sizeof(*ifsm))) {
- NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
- return -EINVAL;
- }
- }
-
filter_mask = ifsm->filter_mask;
if (!filter_mask) {
NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index d6d5c20d7044..e76ed8df9f13 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -78,11 +78,9 @@ int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
{
int i = src->sg.start;
struct scatterlist *sge = sk_msg_elem(src, i);
+ struct scatterlist *sgd = NULL;
u32 sge_len, sge_off;
- if (sk_msg_full(dst))
- return -ENOSPC;
-
while (off) {
if (sge->length > off)
break;
@@ -94,16 +92,27 @@ int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
}
while (len) {
- if (sk_msg_full(dst))
- return -ENOSPC;
-
sge_len = sge->length - off;
- sge_off = sge->offset + off;
if (sge_len > len)
sge_len = len;
+
+ if (dst->sg.end)
+ sgd = sk_msg_elem(dst, dst->sg.end - 1);
+
+ if (sgd &&
+ (sg_page(sge) == sg_page(sgd)) &&
+ (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
+ sgd->length += sge_len;
+ dst->sg.size += sge_len;
+ } else if (!sk_msg_full(dst)) {
+ sge_off = sge->offset + off;
+ sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
+ } else {
+ return -ENOSPC;
+ }
+
off = 0;
len -= sge_len;
- sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
sk_mem_charge(sk, sge_len);
sk_msg_iter_var_next(i);
if (i == src->sg.end && len)
diff --git a/net/core/sock.c b/net/core/sock.c
index 6aa2e7e0b4fb..900e8a9435f5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -520,14 +520,11 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
}
EXPORT_SYMBOL(sk_dst_check);
-static int sock_setbindtodevice(struct sock *sk, char __user *optval,
- int optlen)
+static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
{
int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
struct net *net = sock_net(sk);
- char devname[IFNAMSIZ];
- int index;
/* Sorry... */
ret = -EPERM;
@@ -535,6 +532,32 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval,
goto out;
ret = -EINVAL;
+ if (ifindex < 0)
+ goto out;
+
+ sk->sk_bound_dev_if = ifindex;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ sk_dst_reset(sk);
+
+ ret = 0;
+
+out:
+#endif
+
+ return ret;
+}
+
+static int sock_setbindtodevice(struct sock *sk, char __user *optval,
+ int optlen)
+{
+ int ret = -ENOPROTOOPT;
+#ifdef CONFIG_NETDEVICES
+ struct net *net = sock_net(sk);
+ char devname[IFNAMSIZ];
+ int index;
+
+ ret = -EINVAL;
if (optlen < 0)
goto out;
@@ -566,14 +589,9 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval,
}
lock_sock(sk);
- sk->sk_bound_dev_if = index;
- if (sk->sk_prot->rehash)
- sk->sk_prot->rehash(sk);
- sk_dst_reset(sk);
+ ret = sock_setbindtodevice_locked(sk, index);
release_sock(sk);
- ret = 0;
-
out:
#endif
@@ -1055,6 +1073,10 @@ set_rcvbuf:
}
break;
+ case SO_BINDTOIFINDEX:
+ ret = sock_setbindtodevice_locked(sk, val);
+ break;
+
default:
ret = -ENOPROTOOPT;
break;
@@ -1399,6 +1421,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
SOF_TXTIME_REPORT_ERRORS : 0;
break;
+ case SO_BINDTOIFINDEX:
+ v.val = sk->sk_bound_dev_if;
+ break;
+
default:
/* We implement the SO_SNDLOWAT etc to not be settable
* (1003.1g 7).
@@ -1726,7 +1752,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_err_soft = 0;
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
- atomic64_set(&newsk->sk_cookie, 0);
if (likely(newsk->sk_net_refcnt))
sock_inuse_add(sock_net(newsk), 1);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index d67ec17f2cc8..84bf2861f45f 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -36,6 +36,15 @@ static int net_msg_warn; /* Unused, but still a sysctl */
int sysctl_fb_tunnels_only_for_init_net __read_mostly = 0;
EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
+/* 0 - Keep current behavior:
+ * IPv4: inherit all current settings from init_net
+ * IPv6: reset all settings to default
+ * 1 - Both inherit all current settings from init_net
+ * 2 - Both reset all settings to default
+ */
+int sysctl_devconf_inherit_init_net __read_mostly;
+EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
+
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -544,6 +553,15 @@ static struct ctl_table net_core_table[] = {
.extra1 = &zero,
.extra2 = &one,
},
+ {
+ .procname = "devconf_inherit_init_net",
+ .data = &sysctl_devconf_inherit_init_net,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &two,
+ },
{ }
};
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 85d6c879383d..8d03707abdac 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -480,7 +480,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
}
- if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
+ if (sk->sk_write_pending || inet_csk_in_pingpong_mode(sk) ||
icsk->icsk_accept_queue.rskq_defer_accept) {
/* Save one ACK. Data will be ready after
* several ticks, if write_pending is set.
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 1501a20a94ca..74e138495d67 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -199,7 +199,7 @@ static void dccp_delack_timer(struct timer_list *t)
icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
if (inet_csk_ack_scheduled(sk)) {
- if (!icsk->icsk_ack.pingpong) {
+ if (!inet_csk_in_pingpong_mode(sk)) {
/* Delayed ACK missed: inflate ATO. */
icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
icsk->icsk_rto);
@@ -207,7 +207,7 @@ static void dccp_delack_timer(struct timer_list *t)
/* Delayed ACK missed: leave pingpong mode and
* deflate ATO.
*/
- icsk->icsk_ack.pingpong = 0;
+ inet_csk_exit_pingpong_mode(sk);
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
dccp_send_ack(sk);
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index f78fe58eafc8..6cd3737593a6 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -282,7 +282,7 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct nlattr *att
(nhs = dn_fib_count_nhs(attrs[RTA_MULTIPATH])) == 0)
goto err_inval;
- fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL);
+ fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
err = -ENOBUFS;
if (fi == NULL)
goto failure;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 026a05774bf7..1f4972dab9f2 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -103,7 +103,8 @@ static inline void dsa_legacy_unregister(void) { }
int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags);
+ u16 flags,
+ struct netlink_ext_ack *extack);
int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid);
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 71bb15f491c8..79e97d2f2d9b 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -126,6 +126,17 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
}
}
+static int dsa_master_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+ if (snprintf(name, len, "p%d", cpu_dp->index) >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
static int dsa_master_ethtool_setup(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
@@ -158,6 +169,38 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
cpu_dp->orig_ethtool_ops = NULL;
}
+static int dsa_master_ndo_setup(struct net_device *dev)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct net_device_ops *ops;
+
+ if (dev->netdev_ops->ndo_get_phys_port_name)
+ return 0;
+
+ ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ cpu_dp->orig_ndo_ops = dev->netdev_ops;
+ if (cpu_dp->orig_ndo_ops)
+ memcpy(ops, cpu_dp->orig_ndo_ops, sizeof(*ops));
+
+ ops->ndo_get_phys_port_name = dsa_master_get_phys_port_name;
+
+ dev->netdev_ops = ops;
+
+ return 0;
+}
+
+static void dsa_master_ndo_teardown(struct net_device *dev)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+ dev->netdev_ops = cpu_dp->orig_ndo_ops;
+ cpu_dp->orig_ndo_ops = NULL;
+}
+
static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
char *buf)
{
@@ -223,16 +266,27 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
if (ret)
return ret;
+ ret = dsa_master_ndo_setup(dev);
+ if (ret)
+ goto out_err_ethtool_teardown;
+
ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
if (ret)
- dsa_master_ethtool_teardown(dev);
+ goto out_err_ndo_teardown;
+
+ return ret;
+out_err_ndo_teardown:
+ dsa_master_ndo_teardown(dev);
+out_err_ethtool_teardown:
+ dsa_master_ethtool_teardown(dev);
return ret;
}
void dsa_master_teardown(struct net_device *dev)
{
sysfs_remove_group(&dev->dev.kobj, &dsa_group);
+ dsa_master_ndo_teardown(dev);
dsa_master_ethtool_teardown(dev);
dsa_master_reset_mtu(dev);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3fcc1d01615..91de3a663226 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1009,7 +1009,8 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -1450,7 +1451,7 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
}
fdb_info->offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
- &fdb_info->info);
+ &fdb_info->info, NULL);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e258a00b4a3d..cd9033245b98 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2063,13 +2063,49 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
};
+static int inet_netconf_valid_get_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_ipv4_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_ipv4_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= NETCONFA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NETCONFA_IFINDEX:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet_netconf_get_devconf(struct sk_buff *in_skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[NETCONFA_MAX+1];
- struct netconfmsg *ncm;
struct sk_buff *skb;
struct ipv4_devconf *devconf;
struct in_device *in_dev;
@@ -2077,9 +2113,8 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
int ifindex;
int err;
- err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
- devconf_ipv4_policy, extack);
- if (err < 0)
+ err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
+ if (err)
goto errout;
err = -EINVAL;
@@ -2556,32 +2591,32 @@ static __net_init int devinet_init_net(struct net *net)
int err;
struct ipv4_devconf *all, *dflt;
#ifdef CONFIG_SYSCTL
- struct ctl_table *tbl = ctl_forward_entry;
+ struct ctl_table *tbl;
struct ctl_table_header *forw_hdr;
#endif
err = -ENOMEM;
- all = &ipv4_devconf;
- dflt = &ipv4_devconf_dflt;
+ all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
+ if (!all)
+ goto err_alloc_all;
- if (!net_eq(net, &init_net)) {
- all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
- if (!all)
- goto err_alloc_all;
-
- dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
- if (!dflt)
- goto err_alloc_dflt;
+ dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
+ if (!dflt)
+ goto err_alloc_dflt;
#ifdef CONFIG_SYSCTL
- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
- if (!tbl)
- goto err_alloc_ctl;
+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
+ if (!tbl)
+ goto err_alloc_ctl;
- tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
- tbl[0].extra1 = all;
- tbl[0].extra2 = net;
+ tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
+ tbl[0].extra1 = all;
+ tbl[0].extra2 = net;
#endif
+
+ if (sysctl_devconf_inherit_init_net != 2 && !net_eq(net, &init_net)) {
+ memcpy(all, init_net.ipv4.devconf_all, sizeof(ipv4_devconf));
+ memcpy(dflt, init_net.ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt));
}
#ifdef CONFIG_SYSCTL
@@ -2611,15 +2646,12 @@ err_reg_ctl:
err_reg_dflt:
__devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
err_reg_all:
- if (tbl != ctl_forward_entry)
- kfree(tbl);
+ kfree(tbl);
err_alloc_ctl:
#endif
- if (dflt != &ipv4_devconf_dflt)
- kfree(dflt);
+ kfree(dflt);
err_alloc_dflt:
- if (all != &ipv4_devconf)
- kfree(all);
+ kfree(all);
err_alloc_all:
return err;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 765b2b32c4a4..a40e48ded10d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1493,22 +1493,22 @@ static int ip_mc_check_igmp_reportv3(struct sk_buff *skb)
len += sizeof(struct igmpv3_report);
- return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+ return ip_mc_may_pull(skb, len) ? 0 : -EINVAL;
}
static int ip_mc_check_igmp_query(struct sk_buff *skb)
{
- unsigned int len = skb_transport_offset(skb);
-
- len += sizeof(struct igmphdr);
- if (skb->len < len)
- return -EINVAL;
+ unsigned int transport_len = ip_transport_len(skb);
+ unsigned int len;
/* IGMPv{1,2}? */
- if (skb->len != len) {
+ if (transport_len != sizeof(struct igmphdr)) {
/* or IGMPv3? */
- len += sizeof(struct igmpv3_query) - sizeof(struct igmphdr);
- if (skb->len < len || !pskb_may_pull(skb, len))
+ if (transport_len < sizeof(struct igmpv3_query))
+ return -EINVAL;
+
+ len = skb_transport_offset(skb) + sizeof(struct igmpv3_query);
+ if (!ip_mc_may_pull(skb, len))
return -EINVAL;
}
@@ -1544,47 +1544,29 @@ static inline __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
return skb_checksum_simple_validate(skb);
}
-static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
-
+static int ip_mc_check_igmp_csum(struct sk_buff *skb)
{
- struct sk_buff *skb_chk;
- unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
- int ret = -EINVAL;
+ unsigned int transport_len = ip_transport_len(skb);
+ struct sk_buff *skb_chk;
- transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
+ if (!ip_mc_may_pull(skb, len))
+ return -EINVAL;
skb_chk = skb_checksum_trimmed(skb, transport_len,
ip_mc_validate_checksum);
if (!skb_chk)
- goto err;
-
- if (!pskb_may_pull(skb_chk, len))
- goto err;
-
- ret = ip_mc_check_igmp_msg(skb_chk);
- if (ret)
- goto err;
-
- if (skb_trimmed)
- *skb_trimmed = skb_chk;
- /* free now unneeded clone */
- else if (skb_chk != skb)
- kfree_skb(skb_chk);
-
- ret = 0;
+ return -EINVAL;
-err:
- if (ret && skb_chk && skb_chk != skb)
+ if (skb_chk != skb)
kfree_skb(skb_chk);
- return ret;
+ return 0;
}
/**
* ip_mc_check_igmp - checks whether this is a sane IGMP packet
* @skb: the skb to validate
- * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
*
* Checks whether an IPv4 packet is a valid IGMP packet. If so sets
* skb transport header accordingly and returns zero.
@@ -1594,18 +1576,10 @@ err:
* -ENOMSG: IP header validation succeeded but it is not an IGMP packet.
* -ENOMEM: A memory allocation failure happened.
*
- * Optionally, an skb pointer might be provided via skb_trimmed (or set it
- * to NULL): After parsing an IGMP packet successfully it will point to
- * an skb which has its tail aligned to the IP packet end. This might
- * either be the originally provided skb or a trimmed, cloned version if
- * the skb frame had data beyond the IP packet. A cloned skb allows us
- * to leave the original skb and its full frame unchanged (which might be
- * desirable for layer 2 frame jugglers).
- *
* Caller needs to set the skb network header and free any returned skb if it
* differs from the provided skb.
*/
-int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+int ip_mc_check_igmp(struct sk_buff *skb)
{
int ret = ip_mc_check_iphdr(skb);
@@ -1615,7 +1589,11 @@ int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
if (ip_hdr(skb)->protocol != IPPROTO_IGMP)
return -ENOMSG;
- return __ip_mc_check_igmp(skb, skb_trimmed);
+ ret = ip_mc_check_igmp_csum(skb);
+ if (ret < 0)
+ return ret;
+
+ return ip_mc_check_igmp_msg(skb);
}
EXPORT_SYMBOL(ip_mc_check_igmp);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 760a9e52e02b..9f69411251d0 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -25,6 +25,62 @@
#include <net/sock.h>
#include <net/inet_frag.h>
#include <net/inet_ecn.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ };
+ struct sk_buff *next_frag;
+ int frag_run_len;
+};
+
+#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void fragcb_clear(struct sk_buff *skb)
+{
+ RB_CLEAR_NODE(&skb->rbnode);
+ FRAG_CB(skb)->next_frag = NULL;
+ FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void fragrun_append_to_last(struct inet_frag_queue *q,
+ struct sk_buff *skb)
+{
+ fragcb_clear(skb);
+
+ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+ FRAG_CB(q->fragments_tail)->next_frag = skb;
+ q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+ fragcb_clear(skb);
+
+ if (q->last_run_head)
+ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+ &q->last_run_head->rbnode.rb_right);
+ else
+ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+ q->fragments_tail = skb;
+ q->last_run_head = skb;
+}
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
* Value : 0xff if frame should be dropped.
@@ -123,6 +179,28 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
kmem_cache_free(f->frags_cachep, q);
}
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+{
+ struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
+
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
+ while (skb) {
+ struct sk_buff *next = FRAG_CB(skb)->next_frag;
+
+ sum += skb->truesize;
+ kfree_skb(skb);
+ skb = next;
+ }
+ }
+ return sum;
+}
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
void inet_frag_destroy(struct inet_frag_queue *q)
{
struct sk_buff *fp;
@@ -224,3 +302,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
return fq;
}
EXPORT_SYMBOL(inet_frag_find);
+
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ int offset, int end)
+{
+ struct sk_buff *last = q->fragments_tail;
+
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ *
+ * Duplicates, however, should be ignored (i.e. skb dropped, but the
+ * queue/fragments kept for later reassembly).
+ */
+ if (!last)
+ fragrun_create(q, skb); /* First fragment. */
+ else if (last->ip_defrag_offset + last->len < end) {
+ /* This is the common case: skb goes to the end. */
+ /* Detect and discard overlaps. */
+ if (offset < last->ip_defrag_offset + last->len)
+ return IPFRAG_OVERLAP;
+ if (offset == last->ip_defrag_offset + last->len)
+ fragrun_append_to_last(q, skb);
+ else
+ fragrun_create(q, skb);
+ } else {
+ /* Binary search. Note that skb can become the first fragment,
+ * but not the last (covered above).
+ */
+ struct rb_node **rbn, *parent;
+
+ rbn = &q->rb_fragments.rb_node;
+ do {
+ struct sk_buff *curr;
+ int curr_run_end;
+
+ parent = *rbn;
+ curr = rb_to_skb(parent);
+ curr_run_end = curr->ip_defrag_offset +
+ FRAG_CB(curr)->frag_run_len;
+ if (end <= curr->ip_defrag_offset)
+ rbn = &parent->rb_left;
+ else if (offset >= curr_run_end)
+ rbn = &parent->rb_right;
+ else if (offset >= curr->ip_defrag_offset &&
+ end <= curr_run_end)
+ return IPFRAG_DUP;
+ else
+ return IPFRAG_OVERLAP;
+ } while (*rbn);
+ /* Here we have parent properly set, and rbn pointing to
+ * one of its NULL left/right children. Insert skb.
+ */
+ fragcb_clear(skb);
+ rb_link_node(&skb->rbnode, parent, rbn);
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+ }
+
+ skb->ip_defrag_offset = offset;
+
+ return IPFRAG_OK;
+}
+EXPORT_SYMBOL(inet_frag_queue_insert);
+
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ struct sk_buff *parent)
+{
+ struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
+ struct sk_buff **nextp;
+ int delta;
+
+ if (head != skb) {
+ fp = skb_clone(skb, GFP_ATOMIC);
+ if (!fp)
+ return NULL;
+ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+ if (RB_EMPTY_NODE(&skb->rbnode))
+ FRAG_CB(parent)->next_frag = fp;
+ else
+ rb_replace_node(&skb->rbnode, &fp->rbnode,
+ &q->rb_fragments);
+ if (q->fragments_tail == skb)
+ q->fragments_tail = fp;
+ skb_morph(skb, head);
+ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &q->rb_fragments);
+ consume_skb(head);
+ head = skb;
+ }
+ WARN_ON(head->ip_defrag_offset != 0);
+
+ delta = -head->truesize;
+
+ /* Head of list must not be cloned. */
+ if (skb_unclone(head, GFP_ATOMIC))
+ return NULL;
+
+ delta += head->truesize;
+ if (delta)
+ add_frag_mem_limit(q->net, delta);
+
+ /* If the first fragment is fragmented itself, we split
+ * it to two chunks: the first with data and paged part
+ * and the second, holding only fragments.
+ */
+ if (skb_has_frag_list(head)) {
+ struct sk_buff *clone;
+ int i, plen = 0;
+
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
+ return NULL;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->data_len = head->data_len - plen;
+ clone->len = clone->data_len;
+ head->truesize += clone->truesize;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+ add_frag_mem_limit(q->net, clone->truesize);
+ skb_shinfo(head)->frag_list = clone;
+ nextp = &clone->next;
+ } else {
+ nextp = &skb_shinfo(head)->frag_list;
+ }
+
+ return nextp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_prepare);
+
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ void *reasm_data)
+{
+ struct sk_buff **nextp = (struct sk_buff **)reasm_data;
+ struct rb_node *rbn;
+ struct sk_buff *fp;
+
+ skb_push(head, head->data - skb_network_header(head));
+
+ /* Traverse the tree in order, to build frag_list. */
+ fp = FRAG_CB(head)->next_frag;
+ rbn = rb_next(&head->rbnode);
+ rb_erase(&head->rbnode, &q->rb_fragments);
+ while (rbn || fp) {
+ /* fp points to the next sk_buff in the current run;
+ * rbn points to the next run.
+ */
+ /* Go through the current run. */
+ while (fp) {
+ *nextp = fp;
+ nextp = &fp->next;
+ fp->prev = NULL;
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ fp->sk = NULL;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ fp = FRAG_CB(fp)->next_frag;
+ }
+ /* Move to the next run. */
+ if (rbn) {
+ struct rb_node *rbnext = rb_next(rbn);
+
+ fp = rb_to_skb(rbn);
+ rb_erase(rbn, &q->rb_fragments);
+ rbn = rbnext;
+ }
+ }
+ sub_frag_mem_limit(q->net, head->truesize);
+
+ *nextp = NULL;
+ skb_mark_not_on_list(head);
+ head->prev = NULL;
+ head->tstamp = q->stamp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_finish);
+
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
+{
+ struct sk_buff *head;
+
+ if (q->fragments) {
+ head = q->fragments;
+ q->fragments = head->next;
+ } else {
+ struct sk_buff *skb;
+
+ head = skb_rb_first(&q->rb_fragments);
+ if (!head)
+ return NULL;
+ skb = FRAG_CB(head)->next_frag;
+ if (skb)
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &q->rb_fragments);
+ else
+ rb_erase(&head->rbnode, &q->rb_fragments);
+ memset(&head->rbnode, 0, sizeof(head->rbnode));
+ barrier();
+ }
+ if (head == q->fragments_tail)
+ q->fragments_tail = NULL;
+
+ sub_frag_mem_limit(q->net, head->truesize);
+
+ return head;
+}
+EXPORT_SYMBOL(inet_frag_pull_head);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 867be8f7f1fa..486ecb0aeb87 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -57,57 +57,6 @@
*/
static const char ip_frag_cache_name[] = "ip4-frags";
-/* Use skb->cb to track consecutive/adjacent fragments coming at
- * the end of the queue. Nodes in the rb-tree queue will
- * contain "runs" of one or more adjacent fragments.
- *
- * Invariants:
- * - next_frag is NULL at the tail of a "run";
- * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
- */
-struct ipfrag_skb_cb {
- struct inet_skb_parm h;
- struct sk_buff *next_frag;
- int frag_run_len;
-};
-
-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
-
-static void ip4_frag_init_run(struct sk_buff *skb)
-{
- BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
-
- FRAG_CB(skb)->next_frag = NULL;
- FRAG_CB(skb)->frag_run_len = skb->len;
-}
-
-/* Append skb to the last "run". */
-static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
- struct sk_buff *skb)
-{
- RB_CLEAR_NODE(&skb->rbnode);
- FRAG_CB(skb)->next_frag = NULL;
-
- FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
- FRAG_CB(q->fragments_tail)->next_frag = skb;
- q->fragments_tail = skb;
-}
-
-/* Create a new "run" with the skb. */
-static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
-{
- if (q->last_run_head)
- rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
- &q->last_run_head->rbnode.rb_right);
- else
- rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
- rb_insert_color(&skb->rbnode, &q->rb_fragments);
-
- ip4_frag_init_run(skb);
- q->fragments_tail = skb;
- q->last_run_head = skb;
-}
-
/* Describe an entry in the "incomplete datagrams" queue. */
struct ipq {
struct inet_frag_queue q;
@@ -212,27 +161,9 @@ static void ip_expire(struct timer_list *t)
* pull the head out of the tree in order to be able to
* deal with head->dev.
*/
- if (qp->q.fragments) {
- head = qp->q.fragments;
- qp->q.fragments = head->next;
- } else {
- head = skb_rb_first(&qp->q.rb_fragments);
- if (!head)
- goto out;
- if (FRAG_CB(head)->next_frag)
- rb_replace_node(&head->rbnode,
- &FRAG_CB(head)->next_frag->rbnode,
- &qp->q.rb_fragments);
- else
- rb_erase(&head->rbnode, &qp->q.rb_fragments);
- memset(&head->rbnode, 0, sizeof(head->rbnode));
- barrier();
- }
- if (head == qp->q.fragments_tail)
- qp->q.fragments_tail = NULL;
-
- sub_frag_mem_limit(qp->q.net, head->truesize);
-
+ head = inet_frag_pull_head(&qp->q);
+ if (!head)
+ goto out;
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
goto out;
@@ -344,12 +275,10 @@ static int ip_frag_reinit(struct ipq *qp)
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
- struct rb_node **rbn, *parent;
- struct sk_buff *skb1, *prev_tail;
- int ihl, end, skb1_run_end;
+ int ihl, end, flags, offset;
+ struct sk_buff *prev_tail;
struct net_device *dev;
unsigned int fragsize;
- int flags, offset;
int err = -ENOENT;
u8 ecn;
@@ -413,62 +342,13 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
/* Makes sure compiler wont do silly aliasing games */
barrier();
- /* RFC5722, Section 4, amended by Errata ID : 3089
- * When reassembling an IPv6 datagram, if
- * one or more its constituent fragments is determined to be an
- * overlapping fragment, the entire datagram (and any constituent
- * fragments) MUST be silently discarded.
- *
- * We do the same here for IPv4 (and increment an snmp counter) but
- * we do not want to drop the whole queue in response to a duplicate
- * fragment.
- */
-
- err = -EINVAL;
- /* Find out where to put this fragment. */
prev_tail = qp->q.fragments_tail;
- if (!prev_tail)
- ip4_frag_create_run(&qp->q, skb); /* First fragment. */
- else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
- /* This is the common case: skb goes to the end. */
- /* Detect and discard overlaps. */
- if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
- goto overlap;
- if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
- ip4_frag_append_to_last_run(&qp->q, skb);
- else
- ip4_frag_create_run(&qp->q, skb);
- } else {
- /* Binary search. Note that skb can become the first fragment,
- * but not the last (covered above).
- */
- rbn = &qp->q.rb_fragments.rb_node;
- do {
- parent = *rbn;
- skb1 = rb_to_skb(parent);
- skb1_run_end = skb1->ip_defrag_offset +
- FRAG_CB(skb1)->frag_run_len;
- if (end <= skb1->ip_defrag_offset)
- rbn = &parent->rb_left;
- else if (offset >= skb1_run_end)
- rbn = &parent->rb_right;
- else if (offset >= skb1->ip_defrag_offset &&
- end <= skb1_run_end)
- goto err; /* No new data, potential duplicate */
- else
- goto overlap; /* Found an overlap */
- } while (*rbn);
- /* Here we have parent properly set, and rbn pointing to
- * one of its NULL left/right children. Insert skb.
- */
- ip4_frag_init_run(skb);
- rb_link_node(&skb->rbnode, parent, rbn);
- rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
- }
+ err = inet_frag_queue_insert(&qp->q, skb, offset, end);
+ if (err)
+ goto insert_error;
if (dev)
qp->iif = dev->ifindex;
- skb->ip_defrag_offset = offset;
qp->q.stamp = skb->tstamp;
qp->q.meat += skb->len;
@@ -501,10 +381,16 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
skb_dst_drop(skb);
return -EINPROGRESS;
-overlap:
+insert_error:
+ if (err == IPFRAG_DUP) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ err = -EINVAL;
__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
discard_qp:
inet_frag_kill(&qp->q);
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
err:
kfree_skb(skb);
return err;
@@ -516,13 +402,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph;
- struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
- struct sk_buff **nextp; /* To build frag_list. */
- struct rb_node *rbn;
- int len;
- int ihlen;
- int delta;
- int err;
+ void *reasm_data;
+ int len, err;
u8 ecn;
ipq_kill(qp);
@@ -532,117 +413,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
err = -EINVAL;
goto out_fail;
}
- /* Make the one we just received the head. */
- if (head != skb) {
- fp = skb_clone(skb, GFP_ATOMIC);
- if (!fp)
- goto out_nomem;
- FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
- if (RB_EMPTY_NODE(&skb->rbnode))
- FRAG_CB(prev_tail)->next_frag = fp;
- else
- rb_replace_node(&skb->rbnode, &fp->rbnode,
- &qp->q.rb_fragments);
- if (qp->q.fragments_tail == skb)
- qp->q.fragments_tail = fp;
- skb_morph(skb, head);
- FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
- rb_replace_node(&head->rbnode, &skb->rbnode,
- &qp->q.rb_fragments);
- consume_skb(head);
- head = skb;
- }
- WARN_ON(head->ip_defrag_offset != 0);
-
- /* Allocate a new buffer for the datagram. */
- ihlen = ip_hdrlen(head);
- len = ihlen + qp->q.len;
+ /* Make the one we just received the head. */
+ reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
+ if (!reasm_data)
+ goto out_nomem;
+ len = ip_hdrlen(skb) + qp->q.len;
err = -E2BIG;
if (len > 65535)
goto out_oversize;
- delta = - head->truesize;
-
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
- goto out_nomem;
-
- delta += head->truesize;
- if (delta)
- add_frag_mem_limit(qp->q.net, delta);
-
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments. */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (!clone)
- goto out_nomem;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = clone->data_len = head->data_len - plen;
- head->truesize += clone->truesize;
- clone->csum = 0;
- clone->ip_summed = head->ip_summed;
- add_frag_mem_limit(qp->q.net, clone->truesize);
- skb_shinfo(head)->frag_list = clone;
- nextp = &clone->next;
- } else {
- nextp = &skb_shinfo(head)->frag_list;
- }
-
- skb_push(head, head->data - skb_network_header(head));
+ inet_frag_reasm_finish(&qp->q, skb, reasm_data);
- /* Traverse the tree in order, to build frag_list. */
- fp = FRAG_CB(head)->next_frag;
- rbn = rb_next(&head->rbnode);
- rb_erase(&head->rbnode, &qp->q.rb_fragments);
- while (rbn || fp) {
- /* fp points to the next sk_buff in the current run;
- * rbn points to the next run.
- */
- /* Go through the current run. */
- while (fp) {
- *nextp = fp;
- nextp = &fp->next;
- fp->prev = NULL;
- memset(&fp->rbnode, 0, sizeof(fp->rbnode));
- fp->sk = NULL;
- head->data_len += fp->len;
- head->len += fp->len;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
- fp = FRAG_CB(fp)->next_frag;
- }
- /* Move to the next run. */
- if (rbn) {
- struct rb_node *rbnext = rb_next(rbn);
-
- fp = rb_to_skb(rbn);
- rb_erase(rbn, &qp->q.rb_fragments);
- rbn = rbnext;
- }
- }
- sub_frag_mem_limit(qp->q.net, head->truesize);
-
- *nextp = NULL;
- skb_mark_not_on_list(head);
- head->prev = NULL;
- head->dev = dev;
- head->tstamp = qp->q.stamp;
- IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
+ skb->dev = dev;
+ IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
- iph = ip_hdr(head);
+ iph = ip_hdr(skb);
iph->tot_len = htons(len);
iph->tos |= ecn;
@@ -655,7 +442,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
* from one very small df-fragment and one large non-df frag.
*/
if (qp->max_df_size == qp->q.max_size) {
- IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+ IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
iph->frag_off = htons(IP_DF);
} else {
iph->frag_off = 0;
@@ -753,28 +540,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
}
EXPORT_SYMBOL(ip_check_defrag);
-unsigned int inet_frag_rbtree_purge(struct rb_root *root)
-{
- struct rb_node *p = rb_first(root);
- unsigned int sum = 0;
-
- while (p) {
- struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
-
- p = rb_next(p);
- rb_erase(&skb->rbnode, root);
- while (skb) {
- struct sk_buff *next = FRAG_CB(skb)->next_frag;
-
- sum += skb->truesize;
- kfree_skb(skb);
- skb = next;
- }
- }
- return sum;
-}
-EXPORT_SYMBOL(inet_frag_rbtree_purge);
-
#ifdef CONFIG_SYSCTL
static int dist_min;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 20a64fe6254b..d1cef66820d3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -449,81 +449,14 @@ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
}
-static struct rtable *gre_get_rt(struct sk_buff *skb,
- struct net_device *dev,
- struct flowi4 *fl,
- const struct ip_tunnel_key *key)
-{
- struct net *net = dev_net(dev);
-
- memset(fl, 0, sizeof(*fl));
- fl->daddr = key->u.ipv4.dst;
- fl->saddr = key->u.ipv4.src;
- fl->flowi4_tos = RT_TOS(key->tos);
- fl->flowi4_mark = skb->mark;
- fl->flowi4_proto = IPPROTO_GRE;
-
- return ip_route_output_key(net, fl);
-}
-
-static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
- struct net_device *dev,
- struct flowi4 *fl,
- int tunnel_hlen)
-{
- struct ip_tunnel_info *tun_info;
- const struct ip_tunnel_key *key;
- struct rtable *rt = NULL;
- int min_headroom;
- bool use_cache;
- int err;
-
- tun_info = skb_tunnel_info(skb);
- key = &tun_info->key;
- use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
-
- if (use_cache)
- rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
- if (!rt) {
- rt = gre_get_rt(skb, dev, fl, key);
- if (IS_ERR(rt))
- goto err_free_skb;
- if (use_cache)
- dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
- fl->saddr);
- }
-
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + tunnel_hlen + sizeof(struct iphdr);
- if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
- int head_delta = SKB_DATA_ALIGN(min_headroom -
- skb_headroom(skb) +
- 16);
- err = pskb_expand_head(skb, max_t(int, head_delta, 0),
- 0, GFP_ATOMIC);
- if (unlikely(err))
- goto err_free_rt;
- }
- return rt;
-
-err_free_rt:
- ip_rt_put(rt);
-err_free_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NULL;
-}
-
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
- struct rtable *rt = NULL;
- struct flowi4 fl;
int tunnel_hlen;
- __be16 df, flags;
+ __be16 flags;
tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
@@ -533,13 +466,12 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
key = &tun_info->key;
tunnel_hlen = gre_calc_hlen(key->tun_flags);
- rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
- if (!rt)
- return;
+ if (skb_cow_head(skb, dev->needed_headroom))
+ goto err_free_skb;
/* Push Tunnel header. */
if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
- goto err_free_rt;
+ goto err_free_skb;
flags = tun_info->key.tun_flags &
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
@@ -547,14 +479,10 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
tunnel_id_to_key32(tun_info->key.tun_id),
(flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
- iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
- key->tos, key->ttl, df, false);
return;
-err_free_rt:
- ip_rt_put(rt);
err_free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
@@ -566,10 +494,8 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
struct erspan_metadata *md;
- struct rtable *rt = NULL;
bool truncate = false;
- __be16 df, proto;
- struct flowi4 fl;
+ __be16 proto;
int tunnel_hlen;
int version;
int nhoff;
@@ -582,21 +508,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
key = &tun_info->key;
if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
- goto err_free_rt;
+ goto err_free_skb;
md = ip_tunnel_info_opts(tun_info);
if (!md)
- goto err_free_rt;
+ goto err_free_skb;
/* ERSPAN has fixed 8 byte GRE header */
version = md->version;
tunnel_hlen = 8 + erspan_hdr_len(version);
- rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
- if (!rt)
- return;
+ if (skb_cow_head(skb, dev->needed_headroom))
+ goto err_free_skb;
if (gre_handle_offloads(skb, false))
- goto err_free_rt;
+ goto err_free_skb;
if (skb->len > dev->mtu + dev->hard_header_len) {
pskb_trim(skb, dev->mtu + dev->hard_header_len);
@@ -625,20 +550,16 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
truncate, true);
proto = htons(ETH_P_ERSPAN2);
} else {
- goto err_free_rt;
+ goto err_free_skb;
}
gre_build_header(skb, 8, TUNNEL_SEQ,
proto, 0, htonl(tunnel->o_seqno++));
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
- iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
- key->tos, key->ttl, df, false);
return;
-err_free_rt:
- ip_rt_put(rt);
err_free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
@@ -647,13 +568,18 @@ err_free_skb:
static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ const struct ip_tunnel_key *key;
struct rtable *rt;
struct flowi4 fl4;
if (ip_tunnel_info_af(info) != AF_INET)
return -EINVAL;
- rt = gre_get_rt(skb, dev, &fl4, &info->key);
+ key = &info->key;
+ ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
+ tunnel_id_to_key32(key->tun_id), key->tos, 0,
+ skb->mark);
+ rt = ip_route_output_key(dev_net(dev), &fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 51d8efba6de2..cd6b5694f99e 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -429,7 +429,6 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
-
__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -521,6 +520,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
skb = ip_rcv_core(skb, net);
if (skb == NULL)
return NET_RX_DROP;
+
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
net, NULL, skb, dev, NULL,
ip_rcv_finish);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 054d01c16dc6..893f013d5369 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -501,15 +501,19 @@ EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
struct rtable *rt, __be16 df,
- const struct iphdr *inner_iph)
+ const struct iphdr *inner_iph,
+ int tunnel_hlen, __be32 dst, bool md)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
+ int pkt_size;
int mtu;
+ tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
+ pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
+
if (df)
mtu = dst_mtu(&rt->dst) - dev->hard_header_len
- - sizeof(struct iphdr) - tunnel->hlen;
+ - sizeof(struct iphdr) - tunnel_hlen;
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
@@ -527,11 +531,13 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
+ __be32 daddr;
+
+ daddr = md ? dst : tunnel->parms.iph.daddr;
if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
mtu >= IPV6_MIN_MTU) {
- if ((tunnel->parms.iph.daddr &&
- !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
+ if ((daddr && !ipv4_is_multicast(daddr)) ||
rt6->rt6i_dst.plen == 128) {
rt6->rt6i_flags |= RTF_MODIFIED;
dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
@@ -548,17 +554,19 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ u8 proto, int tunnel_hlen)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
u32 headroom = sizeof(struct iphdr);
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
const struct iphdr *inner_iph;
- struct rtable *rt;
+ struct rtable *rt = NULL;
struct flowi4 fl4;
__be16 df = 0;
u8 tos, ttl;
+ bool use_cache;
tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
@@ -574,20 +582,39 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
else if (skb->protocol == htons(ETH_P_IPV6))
tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
}
- ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
- RT_TOS(tos), tunnel->parms.link, tunnel->fwmark);
+ ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
+ tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
+ 0, skb->mark);
if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
goto tx_error;
- rt = ip_route_output_key(tunnel->net, &fl4);
- if (IS_ERR(rt)) {
- dev->stats.tx_carrier_errors++;
- goto tx_error;
+
+ use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
+ if (use_cache)
+ rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr);
+ if (!rt) {
+ rt = ip_route_output_key(tunnel->net, &fl4);
+ if (IS_ERR(rt)) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
+ if (use_cache)
+ dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
+ fl4.saddr);
}
if (rt->dst.dev == dev) {
ip_rt_put(rt);
dev->stats.collisions++;
goto tx_error;
}
+
+ if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+ if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
+ key->u.ipv4.dst, true)) {
+ ip_rt_put(rt);
+ goto tx_error;
+ }
+
tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
ttl = key->ttl;
if (ttl == 0) {
@@ -598,10 +625,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
else
ttl = ip4_dst_hoplimit(&rt->dst);
}
- if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
- df = htons(IP_DF);
- else if (skb->protocol == htons(ETH_P_IP))
+
+ if (!df && skb->protocol == htons(ETH_P_IP))
df = inner_iph->frag_off & htons(IP_DF);
+
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
if (headroom > dev->needed_headroom)
dev->needed_headroom = headroom;
@@ -737,7 +764,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
+ if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
+ 0, 0, false)) {
ip_rt_put(rt);
goto tx_error;
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 57c5dd283a2c..fe10b9a2efc8 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -302,7 +302,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
skb_set_inner_ipproto(skb, ipproto);
if (tunnel->collect_md)
- ip_md_tunnel_xmit(skb, dev, ipproto);
+ ip_md_tunnel_xmit(skb, dev, ipproto, 0);
else
ip_tunnel_xmit(skb, dev, tiph, ipproto);
return NETDEV_TX_OK;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ddbf8c9a1abb..fb99002c3d4e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2467,6 +2467,61 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
}
+static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid header for multicast route get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
+
+ rtm = nlmsg_data(nlh);
+ if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
+ (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
+ rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol ||
+ rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for multicast route get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
+ if (err)
+ return err;
+
+ if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
+ (tb[RTA_DST] && !rtm->rtm_dst_len)) {
+ NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= RTA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case RTA_SRC:
+ case RTA_DST:
+ case RTA_TABLE:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in multicast route get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -2475,18 +2530,14 @@ static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct sk_buff *skb = NULL;
struct mfc_cache *cache;
struct mr_table *mrt;
- struct rtmsg *rtm;
__be32 src, grp;
u32 tableid;
int err;
- err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_ipv4_policy, extack);
+ err = ipmr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
if (err < 0)
goto errout;
- rtm = nlmsg_data(nlh);
-
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 8d2e5dc9a827..a058213b77a7 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -80,24 +80,6 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
}
EXPORT_SYMBOL(ip_route_me_harder);
-int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
-{
- const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
- if (entry->state.hook == NF_INET_LOCAL_OUT) {
- const struct iphdr *iph = ip_hdr(skb);
-
- if (!(iph->tos == rt_info->tos &&
- skb->mark == rt_info->mark &&
- iph->daddr == rt_info->daddr &&
- iph->saddr == rt_info->saddr))
- return ip_route_me_harder(entry->state.net, skb,
- RTN_UNSPEC);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(nf_ip_reroute);
-
int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict __always_unused)
{
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 2687db015b6f..e26165af45cb 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -214,7 +214,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
}
/* Change outer to look like the reply to an incoming packet */
- nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
return 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ce92f73cf104..99be68b15da0 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2763,6 +2763,75 @@ static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
return skb;
}
+static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG(extack,
+ "ipv4: Invalid header for route get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
+
+ rtm = nlmsg_data(nlh);
+ if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
+ (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
+ rtm->rtm_table || rtm->rtm_protocol ||
+ rtm->rtm_scope || rtm->rtm_type) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
+ return -EINVAL;
+ }
+
+ if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
+ RTM_F_LOOKUP_TABLE |
+ RTM_F_FIB_MATCH)) {
+ NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
+ if (err)
+ return err;
+
+ if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
+ (tb[RTA_DST] && !rtm->rtm_dst_len)) {
+ NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= RTA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case RTA_IIF:
+ case RTA_OIF:
+ case RTA_SRC:
+ case RTA_DST:
+ case RTA_IP_PROTO:
+ case RTA_SPORT:
+ case RTA_DPORT:
+ case RTA_MARK:
+ case RTA_UID:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -2783,8 +2852,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
int err;
int mark;
- err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
- extack);
+ err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
if (err < 0)
return err;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2079145a3b7c..6f8d292ad501 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1127,7 +1127,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp)
}
static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
- int *copied, size_t size)
+ int *copied, size_t size,
+ struct ubuf_info *uarg)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
@@ -1147,6 +1148,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
return -ENOBUFS;
tp->fastopen_req->data = msg;
tp->fastopen_req->size = size;
+ tp->fastopen_req->uarg = uarg;
if (inet->defer_connect) {
err = tcp_connect(sk);
@@ -1186,11 +1188,6 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
flags = msg->msg_flags;
if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
- if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
- err = -EINVAL;
- goto out_err;
- }
-
skb = tcp_write_queue_tail(sk);
uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb));
if (!uarg) {
@@ -1205,7 +1202,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
!tp->repair) {
- err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
+ err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
if (err == -EINPROGRESS && copied_syn > 0)
goto out;
else if (err)
@@ -1554,7 +1551,7 @@ static void tcp_cleanup_rbuf(struct sock *sk, int copied)
(copied > 0 &&
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
- !icsk->icsk_ack.pingpong)) &&
+ !inet_csk_in_pingpong_mode(sk))) &&
!atomic_read(&sk->sk_rmem_alloc)))
time_to_ack = true;
}
@@ -2572,14 +2569,16 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->srtt_us = 0;
+ tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
tp->rcv_rtt_last_tsecr = 0;
tp->write_seq += tp->max_window + 2;
if (tp->write_seq == 0)
tp->write_seq = 1;
icsk->icsk_backoff = 0;
- tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
+ icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ tp->snd_cwnd = TCP_INIT_CWND;
tp->snd_cwnd_cnt = 0;
tp->window_clamp = 0;
tp->delivered_ce = 0;
@@ -2603,6 +2602,23 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->duplicate_sack[0].end_seq = 0;
tp->dsack_dups = 0;
tp->reord_seen = 0;
+ tp->retrans_out = 0;
+ tp->sacked_out = 0;
+ tp->tlp_high_seq = 0;
+ tp->last_oow_ack_time = 0;
+ /* There's a bubble in the pipe until at least the first ACK. */
+ tp->app_limited = ~0U;
+ tp->rack.mstamp = 0;
+ tp->rack.advanced = 0;
+ tp->rack.reo_wnd_steps = 1;
+ tp->rack.last_delivered = 0;
+ tp->rack.reo_wnd_persist = 0;
+ tp->rack.dsack_seen = 0;
+ tp->syn_data_acked = 0;
+ tp->rx_opt.saw_tstamp = 0;
+ tp->rx_opt.dsack = 0;
+ tp->rx_opt.num_sacks = 0;
+
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
@@ -2968,16 +2984,16 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_QUICKACK:
if (!val) {
- icsk->icsk_ack.pingpong = 1;
+ inet_csk_enter_pingpong_mode(sk);
} else {
- icsk->icsk_ack.pingpong = 0;
+ inet_csk_exit_pingpong_mode(sk);
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
inet_csk_ack_scheduled(sk)) {
icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
tcp_cleanup_rbuf(sk, 1);
if (!(val & 1))
- icsk->icsk_ack.pingpong = 1;
+ inet_csk_enter_pingpong_mode(sk);
}
}
break;
@@ -3391,7 +3407,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
return 0;
}
case TCP_QUICKACK:
- val = !icsk->icsk_ack.pingpong;
+ val = !inet_csk_in_pingpong_mode(sk);
break;
case TCP_CONGESTION:
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 0f497fc49c3f..56be7d27f208 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -115,6 +115,14 @@ struct bbr {
unused_b:5;
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
u32 full_bw; /* recent bw, to estimate if pipe is full */
+
+ /* For tracking ACK aggregation: */
+ u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
+ u16 extra_acked[2]; /* max excess data ACKed in epoch */
+ u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
+ extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
+ extra_acked_win_idx:1, /* current index in extra_acked array */
+ unused_c:6;
};
#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
@@ -182,6 +190,15 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
/* If we estimate we're policed, use lt_bw for this many round trips: */
static const u32 bbr_lt_bw_max_rtts = 48;
+/* Gain factor for adding extra_acked to target cwnd: */
+static const int bbr_extra_acked_gain = BBR_UNIT;
+/* Window length of extra_acked window. */
+static const u32 bbr_extra_acked_win_rtts = 5;
+/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
+static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
+/* Time period for clamping cwnd increment due to ack aggregation */
+static const u32 bbr_extra_acked_max_us = 100 * 1000;
+
static void bbr_check_probe_rtt_done(struct sock *sk);
/* Do we estimate that STARTUP filled the pipe? */
@@ -208,6 +225,16 @@ static u32 bbr_bw(const struct sock *sk)
return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
}
+/* Return maximum extra acked in past k-2k round trips,
+ * where k = bbr_extra_acked_win_rtts.
+ */
+static u16 bbr_extra_acked(const struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ return max(bbr->extra_acked[0], bbr->extra_acked[1]);
+}
+
/* Return rate in bytes per second, optionally with a gain.
* The order here is chosen carefully to avoid overflow of u64. This should
* work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
@@ -305,6 +332,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
if (event == CA_EVENT_TX_START && tp->app_limited) {
bbr->idle_restart = 1;
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
+ bbr->ack_epoch_acked = 0;
/* Avoid pointless buffer overflows: pace at est. bw if we don't
* need more speed (we're restarting from idle and app-limited).
*/
@@ -315,30 +344,19 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
}
}
-/* Find target cwnd. Right-size the cwnd based on min RTT and the
- * estimated bottleneck bandwidth:
+/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
*
- * cwnd = bw * min_rtt * gain = BDP * gain
+ * bdp = bw * min_rtt * gain
*
* The key factor, gain, controls the amount of queue. While a small gain
* builds a smaller queue, it becomes more vulnerable to noise in RTT
* measurements (e.g., delayed ACKs or other ACK compression effects). This
* noise may cause BBR to under-estimate the rate.
- *
- * To achieve full performance in high-speed paths, we budget enough cwnd to
- * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
- * - one skb in sending host Qdisc,
- * - one skb in sending host TSO/GSO engine
- * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
- * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
- * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
- * which allows 2 outstanding 2-packet sequences, to try to keep pipe
- * full even with ACK-every-other-packet delayed ACKs.
*/
-static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
+static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
{
struct bbr *bbr = inet_csk_ca(sk);
- u32 cwnd;
+ u32 bdp;
u64 w;
/* If we've never had a valid RTT sample, cap cwnd at the initial
@@ -353,7 +371,24 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
w = (u64)bw * bbr->min_rtt_us;
/* Apply a gain to the given value, then remove the BW_SCALE shift. */
- cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
+ bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
+
+ return bdp;
+}
+
+/* To achieve full performance in high-speed paths, we budget enough cwnd to
+ * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
+ * - one skb in sending host Qdisc,
+ * - one skb in sending host TSO/GSO engine
+ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
+ * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
+ * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
+ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
+ * full even with ACK-every-other-packet delayed ACKs.
+ */
+static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
/* Allow enough full-sized skbs in flight to utilize end systems. */
cwnd += 3 * bbr_tso_segs_goal(sk);
@@ -368,6 +403,17 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
return cwnd;
}
+/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
+static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
+{
+ u32 inflight;
+
+ inflight = bbr_bdp(sk, bw, gain);
+ inflight = bbr_quantization_budget(sk, inflight, gain);
+
+ return inflight;
+}
+
/* With pacing at lower layers, there's often less data "in the network" than
* "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
* we often have several skbs queued in the pacing layer with a pre-scheduled
@@ -401,6 +447,22 @@ static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
return inflight_at_edt - interval_delivered;
}
+/* Find the cwnd increment based on estimate of ack aggregation */
+static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
+{
+ u32 max_aggr_cwnd, aggr_cwnd = 0;
+
+ if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
+ max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
+ / BW_UNIT;
+ aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
+ >> BBR_SCALE;
+ aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
+ }
+
+ return aggr_cwnd;
+}
+
/* An optimization in BBR to reduce losses: On the first round of recovery, we
* follow the packet conservation principle: send P packets per P packets acked.
* After that, we slow-start and send at most 2*P packets per P packets acked.
@@ -461,8 +523,15 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
goto done;
+ target_cwnd = bbr_bdp(sk, bw, gain);
+
+ /* Increment the cwnd to account for excess ACKed data that seems
+ * due to aggregation (of data and/or ACKs) visible in the ACK stream.
+ */
+ target_cwnd += bbr_ack_aggregation_cwnd(sk);
+ target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
+
/* If we're below target cwnd, slow start cwnd toward target cwnd. */
- target_cwnd = bbr_target_cwnd(sk, bw, gain);
if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
cwnd = min(cwnd + acked, target_cwnd);
else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
@@ -503,14 +572,14 @@ static bool bbr_is_next_cycle_phase(struct sock *sk,
if (bbr->pacing_gain > BBR_UNIT)
return is_full_length &&
(rs->losses || /* perhaps pacing_gain*BDP won't fit */
- inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
+ inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
/* A pacing_gain < 1.0 tries to drain extra queue we added if bw
* probing didn't find more bw. If inflight falls to match BDP then we
* estimate queue is drained; persisting would underutilize the pipe.
*/
return is_full_length ||
- inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
+ inflight <= bbr_inflight(sk, bw, BBR_UNIT);
}
static void bbr_advance_cycle_phase(struct sock *sk)
@@ -727,6 +796,67 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
}
}
+/* Estimates the windowed max degree of ack aggregation.
+ * This is used to provision extra in-flight data to keep sending during
+ * inter-ACK silences.
+ *
+ * Degree of ack aggregation is estimated as extra data acked beyond expected.
+ *
+ * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
+ * cwnd += max_extra_acked
+ *
+ * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
+ * Max filter is an approximate sliding window of 5-10 (packet timed) round
+ * trips.
+ */
+static void bbr_update_ack_aggregation(struct sock *sk,
+ const struct rate_sample *rs)
+{
+ u32 epoch_us, expected_acked, extra_acked;
+ struct bbr *bbr = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
+ rs->delivered < 0 || rs->interval_us <= 0)
+ return;
+
+ if (bbr->round_start) {
+ bbr->extra_acked_win_rtts = min(0x1F,
+ bbr->extra_acked_win_rtts + 1);
+ if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
+ bbr->extra_acked_win_rtts = 0;
+ bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
+ 0 : 1;
+ bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
+ }
+ }
+
+ /* Compute how many packets we expected to be delivered over epoch. */
+ epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
+ bbr->ack_epoch_mstamp);
+ expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
+
+ /* Reset the aggregation epoch if ACK rate is below expected rate or
+ * significantly large no. of ack received since epoch (potentially
+ * quite old epoch).
+ */
+ if (bbr->ack_epoch_acked <= expected_acked ||
+ (bbr->ack_epoch_acked + rs->acked_sacked >=
+ bbr_ack_epoch_acked_reset_thresh)) {
+ bbr->ack_epoch_acked = 0;
+ bbr->ack_epoch_mstamp = tp->delivered_mstamp;
+ expected_acked = 0;
+ }
+
+ /* Compute excess data delivered, beyond what was expected. */
+ bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
+ bbr->ack_epoch_acked + rs->acked_sacked);
+ extra_acked = bbr->ack_epoch_acked - expected_acked;
+ extra_acked = min(extra_acked, tp->snd_cwnd);
+ if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
+ bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
+}
+
/* Estimate when the pipe is full, using the change in delivery rate: BBR
* estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
* at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
@@ -762,11 +892,11 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
bbr->mode = BBR_DRAIN; /* drain queue we created */
tcp_sk(sk)->snd_ssthresh =
- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT);
+ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
} /* fall through to check if in-flight is already small: */
if (bbr->mode == BBR_DRAIN &&
bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
+ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
}
@@ -881,6 +1011,7 @@ static void bbr_update_gains(struct sock *sk)
static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
{
bbr_update_bw(sk, rs);
+ bbr_update_ack_aggregation(sk, rs);
bbr_update_cycle_phase(sk, rs);
bbr_check_full_bw_reached(sk, rs);
bbr_check_drain(sk, rs);
@@ -932,6 +1063,13 @@ static void bbr_init(struct sock *sk)
bbr_reset_lt_bw_sampling(sk);
bbr_reset_startup_mode(sk);
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
+ bbr->ack_epoch_acked = 0;
+ bbr->extra_acked_win_rtts = 0;
+ bbr->extra_acked_win_idx = 0;
+ bbr->extra_acked[0] = 0;
+ bbr->extra_acked[1] = 0;
+
cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 76858b14ebe9..7a027dec649b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -221,7 +221,7 @@ void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_incr_quickack(sk, max_quickacks);
- icsk->icsk_ack.pingpong = 0;
+ inet_csk_exit_pingpong_mode(sk);
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
EXPORT_SYMBOL(tcp_enter_quickack_mode);
@@ -236,7 +236,7 @@ static bool tcp_in_quickack_mode(struct sock *sk)
const struct dst_entry *dst = __sk_dst_get(sk);
return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
- (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
+ (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
}
static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
@@ -4094,7 +4094,7 @@ void tcp_fin(struct sock *sk)
case TCP_ESTABLISHED:
/* Move to CLOSE_WAIT */
tcp_set_state(sk, TCP_CLOSE_WAIT);
- inet_csk(sk)->icsk_ack.pingpong = 1;
+ inet_csk_enter_pingpong_mode(sk);
break;
case TCP_CLOSE_WAIT:
@@ -5889,7 +5889,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
return -1;
if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
- icsk->icsk_ack.pingpong) {
+ inet_csk_in_pingpong_mode(sk)) {
/* Save one ACK. Data will be ready after
* several ticks, if write_pending is set.
*
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index efc6fef692ff..662b034f1795 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2437,7 +2437,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
refcount_read(&sk->sk_refcnt), sk,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
- (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
+ (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
tp->snd_cwnd,
state == TCP_LISTEN ?
fastopenq->max_qlen :
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 12affb7864d9..182595e2d40f 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -479,43 +479,16 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
tcp_init_wl(newtp, treq->rcv_isn);
- newtp->srtt_us = 0;
- newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
- newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
- newtp->packets_out = 0;
- newtp->retrans_out = 0;
- newtp->sacked_out = 0;
- newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- newtp->tlp_high_seq = 0;
newtp->lsndtime = tcp_jiffies32;
newsk->sk_txhash = treq->txhash;
- newtp->last_oow_ack_time = 0;
newtp->total_retrans = req->num_retrans;
- /* So many TCP implementations out there (incorrectly) count the
- * initial SYN frame in their delayed-ACK and congestion control
- * algorithms that we must have the following bandaid to talk
- * efficiently to them. -DaveM
- */
- newtp->snd_cwnd = TCP_INIT_CWND;
- newtp->snd_cwnd_cnt = 0;
-
- /* There's a bubble in the pipe until at least the first ACK. */
- newtp->app_limited = ~0U;
-
tcp_init_xmit_timers(newsk);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
- newtp->rx_opt.saw_tstamp = 0;
-
- newtp->rx_opt.dsack = 0;
- newtp->rx_opt.num_sacks = 0;
-
- newtp->urg_data = 0;
-
if (sock_flag(newsk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
@@ -556,13 +529,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
tcp_ecn_openreq_child(newtp, req);
newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL;
- newtp->syn_data_acked = 0;
- newtp->rack.mstamp = 0;
- newtp->rack.advanced = 0;
- newtp->rack.reo_wnd_steps = 1;
- newtp->rack.last_delivered = 0;
- newtp->rack.reo_wnd_persist = 0;
- newtp->rack.dsack_seen = 0;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 730bc44dbad9..96bdb8eae9bb 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -165,13 +165,16 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
- tp->lsndtime = now;
-
- /* If it is a reply for ato after last received
- * packet, enter pingpong mode.
+ /* If this is the first data packet sent in response to the
+ * previous received data,
+ * and it is a reply for ato after last received packet,
+ * increase pingpong count.
*/
- if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
- icsk->icsk_ack.pingpong = 1;
+ if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
+ (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+ inet_csk_inc_pingpong_cnt(sk);
+
+ tp->lsndtime = now;
}
/* Account for an ACK we sent. */
@@ -980,7 +983,6 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
- skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
if (sk->sk_pacing_status != SK_PACING_NONE) {
unsigned long rate = sk->sk_pacing_rate;
@@ -1028,7 +1030,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
BUG_ON(!skb || !tcp_skb_pcount(skb));
tp = tcp_sk(sk);
-
+ prior_wstamp = tp->tcp_wstamp_ns;
+ tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
+ skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
if (clone_it) {
TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
- tp->snd_una;
@@ -1045,11 +1049,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
return -ENOBUFS;
}
- prior_wstamp = tp->tcp_wstamp_ns;
- tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
-
- skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
-
inet = inet_sk(sk);
tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts));
@@ -2937,12 +2936,16 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
+ /* To avoid taking spuriously low RTT samples based on a timestamp
+ * for a transmit that never happened, always mark EVER_RETRANS
+ */
+ TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+
if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
TCP_SKB_CB(skb)->seq, segs, err);
if (likely(!err)) {
- TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
trace_tcp_retransmit_skb(sk, skb);
} else if (err != -EBUSY) {
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
@@ -2963,13 +2966,12 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
#endif
TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
tp->retrans_out += tcp_skb_pcount(skb);
-
- /* Save stamp of the first retransmit. */
- if (!tp->retrans_stamp)
- tp->retrans_stamp = tcp_skb_timestamp(skb);
-
}
+ /* Save stamp of the first (attempted) retransmit. */
+ if (!tp->retrans_stamp)
+ tp->retrans_stamp = tcp_skb_timestamp(skb);
+
if (tp->undo_retrans < 0)
tp->undo_retrans = 0;
tp->undo_retrans += tcp_skb_pcount(skb);
@@ -3456,6 +3458,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
skb_trim(syn_data, copied);
space = copied;
}
+ skb_zcopy_set(syn_data, fo->uarg, NULL);
}
/* No more data pending in inet_wait_for_connect() */
if (space == fo->size)
@@ -3569,7 +3572,7 @@ void tcp_send_delayed_ack(struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ / 2;
- if (icsk->icsk_ack.pingpong ||
+ if (inet_csk_in_pingpong_mode(sk) ||
(icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
max_ato = TCP_DELACK_MAX;
@@ -3750,7 +3753,7 @@ void tcp_send_probe0(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
- unsigned long probe_max;
+ unsigned long timeout;
int err;
err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
@@ -3762,26 +3765,18 @@ void tcp_send_probe0(struct sock *sk)
return;
}
+ icsk->icsk_probes_out++;
if (err <= 0) {
if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
icsk->icsk_backoff++;
- icsk->icsk_probes_out++;
- probe_max = TCP_RTO_MAX;
+ timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
} else {
/* If packet was not sent due to local congestion,
- * do not backoff and do not remember icsk_probes_out.
- * Let local senders to fight for local resources.
- *
- * Use accumulated backoff yet.
+ * Let senders fight for local resources conservatively.
*/
- if (!icsk->icsk_probes_out)
- icsk->icsk_probes_out = 1;
- probe_max = TCP_RESOURCE_PROBE_INTERVAL;
- }
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_when(sk, probe_max),
- TCP_RTO_MAX,
- NULL);
+ timeout = TCP_RESOURCE_PROBE_INTERVAL;
+ }
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
}
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 71a29e9c0620..f0c86398e6a7 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -22,28 +22,14 @@
#include <linux/gfp.h>
#include <net/tcp.h>
-static u32 tcp_retransmit_stamp(const struct sock *sk)
-{
- u32 start_ts = tcp_sk(sk)->retrans_stamp;
-
- if (unlikely(!start_ts)) {
- struct sk_buff *head = tcp_rtx_queue_head(sk);
-
- if (!head)
- return 0;
- start_ts = tcp_skb_timestamp(head);
- }
- return start_ts;
-}
-
static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
u32 elapsed, start_ts;
s32 remaining;
- start_ts = tcp_retransmit_stamp(sk);
- if (!icsk->icsk_user_timeout || !start_ts)
+ start_ts = tcp_sk(sk)->retrans_stamp;
+ if (!icsk->icsk_user_timeout)
return icsk->icsk_rto;
elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
remaining = icsk->icsk_user_timeout - elapsed;
@@ -173,7 +159,20 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
-
+static unsigned int tcp_model_timeout(struct sock *sk,
+ unsigned int boundary,
+ unsigned int rto_base)
+{
+ unsigned int linear_backoff_thresh, timeout;
+
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
+ if (boundary <= linear_backoff_thresh)
+ timeout = ((2 << boundary) - 1) * rto_base;
+ else
+ timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
+ (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ return jiffies_to_msecs(timeout);
+}
/**
* retransmits_timed_out() - returns true if this connection has timed out
* @sk: The current socket
@@ -191,26 +190,15 @@ static bool retransmits_timed_out(struct sock *sk,
unsigned int boundary,
unsigned int timeout)
{
- const unsigned int rto_base = TCP_RTO_MIN;
- unsigned int linear_backoff_thresh, start_ts;
+ unsigned int start_ts;
if (!inet_csk(sk)->icsk_retransmits)
return false;
- start_ts = tcp_retransmit_stamp(sk);
- if (!start_ts)
- return false;
-
- if (likely(timeout == 0)) {
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
+ start_ts = tcp_sk(sk)->retrans_stamp;
+ if (likely(timeout == 0))
+ timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
- if (boundary <= linear_backoff_thresh)
- timeout = ((2 << boundary) - 1) * rto_base;
- else
- timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
- (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
- timeout = jiffies_to_msecs(timeout);
- }
return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
}
@@ -289,14 +277,14 @@ void tcp_delack_timer_handler(struct sock *sk)
icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
if (inet_csk_ack_scheduled(sk)) {
- if (!icsk->icsk_ack.pingpong) {
+ if (!inet_csk_in_pingpong_mode(sk)) {
/* Delayed ACK missed: inflate ATO. */
icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
} else {
/* Delayed ACK missed: leave pingpong mode and
* deflate ATO.
*/
- icsk->icsk_ack.pingpong = 0;
+ inet_csk_exit_pingpong_mode(sk);
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
tcp_mstamp_refresh(tcp_sk(sk));
@@ -345,7 +333,6 @@ static void tcp_probe_timer(struct sock *sk)
struct sk_buff *skb = tcp_send_head(sk);
struct tcp_sock *tp = tcp_sk(sk);
int max_probes;
- u32 start_ts;
if (tp->packets_out || !skb) {
icsk->icsk_probes_out = 0;
@@ -360,12 +347,13 @@ static void tcp_probe_timer(struct sock *sk)
* corresponding system limit. We also implement similar policy when
* we use RTO to probe window in tcp_retransmit_timer().
*/
- start_ts = tcp_skb_timestamp(skb);
- if (!start_ts)
- skb->skb_mstamp_ns = tp->tcp_clock_cache;
- else if (icsk->icsk_user_timeout &&
- (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
- goto abort;
+ if (icsk->icsk_user_timeout) {
+ u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
+ tcp_probe0_base(sk));
+
+ if (elapsed >= icsk->icsk_user_timeout)
+ goto abort;
+ }
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
@@ -395,6 +383,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
int max_retries = icsk->icsk_syn_retries ? :
sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
+ struct tcp_sock *tp = tcp_sk(sk);
struct request_sock *req;
req = tcp_sk(sk)->fastopen_rsk;
@@ -412,6 +401,8 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
inet_rtx_syn_ack(sk, req);
req->num_timeout++;
icsk->icsk_retransmits++;
+ if (!tp->retrans_stamp)
+ tp->retrans_stamp = tcp_time_stamp(tp);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
}
@@ -443,10 +434,8 @@ void tcp_retransmit_timer(struct sock *sk)
*/
return;
}
- if (!tp->packets_out)
- goto out;
-
- WARN_ON(tcp_rtx_queue_empty(sk));
+ if (!tp->packets_out || WARN_ON_ONCE(tcp_rtx_queue_empty(sk)))
+ return;
tp->tlp_high_seq = 0;
@@ -511,14 +500,13 @@ void tcp_retransmit_timer(struct sock *sk)
tcp_enter_loss(sk);
+ icsk->icsk_retransmits++;
if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
/* Retransmission failed because of local congestion,
- * do not backoff.
+ * Let senders fight for local resources conservatively.
*/
- if (!icsk->icsk_retransmits)
- icsk->icsk_retransmits = 1;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
+ TCP_RESOURCE_PROBE_INTERVAL,
TCP_RTO_MAX);
goto out;
}
@@ -539,7 +527,6 @@ void tcp_retransmit_timer(struct sock *sk)
* the 120 second clamps though!
*/
icsk->icsk_backoff++;
- icsk->icsk_retransmits++;
out_reset_timer:
/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index be8b5b2157d8..e93cc0379201 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -21,18 +21,9 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
goto error;
if (cfg->bind_ifindex) {
- struct net_device *dev;
-
- dev = dev_get_by_index(net, cfg->bind_ifindex);
- if (!dev) {
- err = -ENODEV;
- goto error;
- }
-
- err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE,
- dev->name, strlen(dev->name) + 1);
- dev_put(dev);
-
+ err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTOIFINDEX,
+ (void *)&cfg->bind_ifindex,
+ sizeof(cfg->bind_ifindex));
if (err < 0)
goto error;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 84c358804355..dcb1d434f7da 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -597,6 +597,43 @@ static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
};
+static int inet6_netconf_valid_get_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_ipv6_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_ipv6_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= NETCONFA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NETCONFA_IFINDEX:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
@@ -605,14 +642,12 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
struct nlattr *tb[NETCONFA_MAX+1];
struct inet6_dev *in6_dev = NULL;
struct net_device *dev = NULL;
- struct netconfmsg *ncm;
struct sk_buff *skb;
struct ipv6_devconf *devconf;
int ifindex;
int err;
- err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
- devconf_ipv6_policy, extack);
+ err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
if (err < 0)
return err;
@@ -5181,6 +5216,52 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
return inet6_dump_addr(skb, cb, type);
}
+static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct ifaddrmsg *ifm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
+ return -EINVAL;
+ }
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_ipv6_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_ipv6_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= IFA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case IFA_TARGET_NETNSID:
+ case IFA_ADDRESS:
+ case IFA_LOCAL:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -5201,8 +5282,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct sk_buff *skb;
int err;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
- extack);
+ err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
if (err < 0)
return err;
@@ -6824,6 +6904,11 @@ static int __net_init addrconf_init_net(struct net *net)
if (!dflt)
goto err_alloc_dflt;
+ if (sysctl_devconf_inherit_init_net == 1 && !net_eq(net, &init_net)) {
+ memcpy(all, init_net.ipv6.devconf_all, sizeof(ipv6_devconf));
+ memcpy(dflt, init_net.ipv6.devconf_dflt, sizeof(ipv6_devconf_dflt));
+ }
+
/* these will be inherited by all namespaces */
dflt->autoconf = ipv6_defaults.autoconf;
dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 0d1ee82ee55b..d43d076c98f5 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -523,6 +523,50 @@ static inline int ip6addrlbl_msgsize(void)
+ nla_total_size(4); /* IFAL_LABEL */
}
+static int ip6addrlbl_valid_get_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct ifaddrlblmsg *ifal;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifal))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for addrlabel get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX,
+ ifal_policy, extack);
+
+ ifal = nlmsg_data(nlh);
+ if (ifal->__ifal_reserved || ifal->ifal_flags || ifal->ifal_seq) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for addrlabel get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*ifal), tb, IFAL_MAX,
+ ifal_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= IFAL_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case IFAL_ADDRESS:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in addrlabel get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -535,8 +579,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct ip6addrlbl_entry *p;
struct sk_buff *skb;
- err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy,
- extack);
+ err = ip6addrlbl_valid_get_req(in_skb, nlh, tb, extack);
if (err < 0)
return err;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4416368dbd49..e081e69d534e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -524,7 +524,7 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
return PACKET_REJECT;
}
-static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
+static int ip6erspan_rcv(struct sk_buff *skb,
struct tnl_ptk_info *tpi)
{
struct erspan_base_hdr *ershdr;
@@ -607,7 +607,7 @@ static int gre_rcv(struct sk_buff *skb)
if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
tpi.proto == htons(ETH_P_ERSPAN2))) {
- if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
+ if (ip6erspan_rcv(skb, &tpi) == PACKET_RCVD)
return 0;
goto out;
}
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index ad1a9ccd4b44..25430c991cea 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -32,18 +32,9 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
goto error;
}
if (cfg->bind_ifindex) {
- struct net_device *dev;
-
- dev = dev_get_by_index(net, cfg->bind_ifindex);
- if (!dev) {
- err = -ENODEV;
- goto error;
- }
-
- err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE,
- dev->name, strlen(dev->name) + 1);
- dev_put(dev);
-
+ err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTOIFINDEX,
+ (void *)&cfg->bind_ifindex,
+ sizeof(cfg->bind_ifindex));
if (err < 0)
goto error;
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 21f6deb2aec9..42f3f5cd349f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -940,6 +940,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
{
return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
}
+EXPORT_SYMBOL(ipv6_dev_mc_inc);
/*
* device multicast group del
@@ -987,6 +988,7 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
return err;
}
+EXPORT_SYMBOL(ipv6_dev_mc_dec);
/*
* check if the interface/address pair is valid
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
index 9405b04eecc6..55e2ac179f28 100644
--- a/net/ipv6/mcast_snoop.c
+++ b/net/ipv6/mcast_snoop.c
@@ -41,6 +41,8 @@ static int ipv6_mc_check_ip6hdr(struct sk_buff *skb)
if (skb->len < len || len <= offset)
return -EINVAL;
+ skb_set_transport_header(skb, offset);
+
return 0;
}
@@ -77,27 +79,27 @@ static int ipv6_mc_check_mld_reportv2(struct sk_buff *skb)
len += sizeof(struct mld2_report);
- return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+ return ipv6_mc_may_pull(skb, len) ? 0 : -EINVAL;
}
static int ipv6_mc_check_mld_query(struct sk_buff *skb)
{
+ unsigned int transport_len = ipv6_transport_len(skb);
struct mld_msg *mld;
- unsigned int len = skb_transport_offset(skb);
+ unsigned int len;
/* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
return -EINVAL;
- len += sizeof(struct mld_msg);
- if (skb->len < len)
- return -EINVAL;
-
/* MLDv1? */
- if (skb->len != len) {
+ if (transport_len != sizeof(struct mld_msg)) {
/* or MLDv2? */
- len += sizeof(struct mld2_query) - sizeof(struct mld_msg);
- if (skb->len < len || !pskb_may_pull(skb, len))
+ if (transport_len < sizeof(struct mld2_query))
+ return -EINVAL;
+
+ len = skb_transport_offset(skb) + sizeof(struct mld2_query);
+ if (!ipv6_mc_may_pull(skb, len))
return -EINVAL;
}
@@ -115,7 +117,13 @@ static int ipv6_mc_check_mld_query(struct sk_buff *skb)
static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
{
- struct mld_msg *mld = (struct mld_msg *)skb_transport_header(skb);
+ unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
+ struct mld_msg *mld;
+
+ if (!ipv6_mc_may_pull(skb, len))
+ return -EINVAL;
+
+ mld = (struct mld_msg *)skb_transport_header(skb);
switch (mld->mld_type) {
case ICMPV6_MGM_REDUCTION:
@@ -136,49 +144,30 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
}
-static int __ipv6_mc_check_mld(struct sk_buff *skb,
- struct sk_buff **skb_trimmed)
-
+int ipv6_mc_check_icmpv6(struct sk_buff *skb)
{
- struct sk_buff *skb_chk = NULL;
- unsigned int transport_len;
- unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
- int ret = -EINVAL;
+ unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
+ unsigned int transport_len = ipv6_transport_len(skb);
+ struct sk_buff *skb_chk;
- transport_len = ntohs(ipv6_hdr(skb)->payload_len);
- transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
+ if (!ipv6_mc_may_pull(skb, len))
+ return -EINVAL;
skb_chk = skb_checksum_trimmed(skb, transport_len,
ipv6_mc_validate_checksum);
if (!skb_chk)
- goto err;
-
- if (!pskb_may_pull(skb_chk, len))
- goto err;
-
- ret = ipv6_mc_check_mld_msg(skb_chk);
- if (ret)
- goto err;
-
- if (skb_trimmed)
- *skb_trimmed = skb_chk;
- /* free now unneeded clone */
- else if (skb_chk != skb)
- kfree_skb(skb_chk);
-
- ret = 0;
+ return -EINVAL;
-err:
- if (ret && skb_chk && skb_chk != skb)
+ if (skb_chk != skb)
kfree_skb(skb_chk);
- return ret;
+ return 0;
}
+EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
/**
* ipv6_mc_check_mld - checks whether this is a sane MLD packet
* @skb: the skb to validate
- * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
*
* Checks whether an IPv6 packet is a valid MLD packet. If so sets
* skb transport header accordingly and returns zero.
@@ -188,18 +177,10 @@ err:
* -ENOMSG: IP header validation succeeded but it is not an MLD packet.
* -ENOMEM: A memory allocation failure happened.
*
- * Optionally, an skb pointer might be provided via skb_trimmed (or set it
- * to NULL): After parsing an MLD packet successfully it will point to
- * an skb which has its tail aligned to the IP packet end. This might
- * either be the originally provided skb or a trimmed, cloned version if
- * the skb frame had data beyond the IP packet. A cloned skb allows us
- * to leave the original skb and its full frame unchanged (which might be
- * desirable for layer 2 frame jugglers).
- *
* Caller needs to set the skb network header and free any returned skb if it
* differs from the provided skb.
*/
-int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+int ipv6_mc_check_mld(struct sk_buff *skb)
{
int ret;
@@ -211,6 +192,10 @@ int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
if (ret < 0)
return ret;
- return __ipv6_mc_check_mld(skb, skb_trimmed);
+ ret = ipv6_mc_check_icmpv6(skb);
+ if (ret < 0)
+ return ret;
+
+ return ipv6_mc_check_mld_msg(skb);
}
EXPORT_SYMBOL(ipv6_mc_check_mld);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 181da2c40f9a..cb1b4772dac0 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -136,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
}
#endif
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev);
+
static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
{
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
@@ -177,9 +180,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
const struct frag_hdr *fhdr, int nhoff)
{
- struct sk_buff *prev, *next;
unsigned int payload_len;
- int offset, end;
+ struct net_device *dev;
+ struct sk_buff *prev;
+ int offset, end, err;
u8 ecn;
if (fq->q.flags & INET_FRAG_COMPLETE) {
@@ -254,55 +258,18 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
goto err;
}
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
- prev = fq->q.fragments_tail;
- if (!prev || prev->ip_defrag_offset < offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = fq->q.fragments; next != NULL; next = next->next) {
- if (next->ip_defrag_offset >= offset)
- break; /* bingo! */
- prev = next;
- }
-
-found:
- /* RFC5722, Section 4:
- * When reassembling an IPv6 datagram, if
- * one or more its constituent fragments is determined to be an
- * overlapping fragment, the entire datagram (and any constituent
- * fragments, including those not yet received) MUST be silently
- * discarded.
- */
-
- /* Check for overlap with preceding fragment. */
- if (prev &&
- (prev->ip_defrag_offset + prev->len) > offset)
- goto discard_fq;
-
- /* Look for overlap with succeeding segment. */
- if (next && next->ip_defrag_offset < end)
- goto discard_fq;
-
- /* Note : skb->ip_defrag_offset and skb->dev share the same location */
- if (skb->dev)
- fq->iif = skb->dev->ifindex;
+ /* Note : skb->rbnode and skb->dev share the same location. */
+ dev = skb->dev;
/* Makes sure compiler wont do silly aliasing games */
barrier();
- skb->ip_defrag_offset = offset;
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- fq->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- fq->q.fragments = skb;
+ prev = fq->q.fragments_tail;
+ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+ if (err)
+ goto insert_error;
+
+ if (dev)
+ fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
@@ -319,11 +286,25 @@ found:
fq->q.flags |= INET_FRAG_FIRST_IN;
}
- return 0;
+ if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+ fq->q.meat == fq->q.len) {
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ err = nf_ct_frag6_reasm(fq, skb, prev, dev);
+ skb->_skb_refdst = orefdst;
+ return err;
+ }
+
+ skb_dst_drop(skb);
+ return -EINPROGRESS;
-discard_fq:
+insert_error:
+ if (err == IPFRAG_DUP)
+ goto err;
inet_frag_kill(&fq->q);
err:
+ skb_dst_drop(skb);
return -EINVAL;
}
@@ -333,147 +314,67 @@ err:
* It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here.
- *
- * returns true if *prev skb has been transformed into the reassembled
- * skb, false otherwise.
*/
-static bool
-nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev)
{
- struct sk_buff *fp, *head = fq->q.fragments;
- int payload_len, delta;
+ void *reasm_data;
+ int payload_len;
u8 ecn;
inet_frag_kill(&fq->q);
- WARN_ON(head == NULL);
- WARN_ON(head->ip_defrag_offset != 0);
-
ecn = ip_frag_ecn_table[fq->ecn];
if (unlikely(ecn == 0xff))
- return false;
+ goto err;
+
+ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+ if (!reasm_data)
+ goto err;
- /* Unfragmented part is taken from the first segment. */
- payload_len = ((head->data - skb_network_header(head)) -
+ payload_len = ((skb->data - skb_network_header(skb)) -
sizeof(struct ipv6hdr) + fq->q.len -
sizeof(struct frag_hdr));
if (payload_len > IPV6_MAXPLEN) {
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
payload_len);
- return false;
- }
-
- delta = - head->truesize;
-
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
- return false;
-
- delta += head->truesize;
- if (delta)
- add_frag_mem_limit(fq->q.net, delta);
-
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments. */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (clone == NULL)
- return false;
-
- clone->next = head->next;
- head->next = clone;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = clone->data_len = head->data_len - plen;
- head->data_len -= clone->len;
- head->len -= clone->len;
- clone->csum = 0;
- clone->ip_summed = head->ip_summed;
-
- add_frag_mem_limit(fq->q.net, clone->truesize);
- }
-
- /* morph head into last received skb: prev.
- *
- * This allows callers of ipv6 conntrack defrag to continue
- * to use the last skb(frag) passed into the reasm engine.
- * The last skb frag 'silently' turns into the full reassembled skb.
- *
- * Since prev is also part of q->fragments we have to clone it first.
- */
- if (head != prev) {
- struct sk_buff *iter;
-
- fp = skb_clone(prev, GFP_ATOMIC);
- if (!fp)
- return false;
-
- fp->next = prev->next;
-
- iter = head;
- while (iter) {
- if (iter->next == prev) {
- iter->next = fp;
- break;
- }
- iter = iter->next;
- }
-
- skb_morph(prev, head);
- prev->next = head->next;
- consume_skb(head);
- head = prev;
+ goto err;
}
/* We have to remove fragment header from datagram and to relocate
* header in order to calculate ICV correctly. */
- skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
- memmove(head->head + sizeof(struct frag_hdr), head->head,
- (head->data - head->head) - sizeof(struct frag_hdr));
- head->mac_header += sizeof(struct frag_hdr);
- head->network_header += sizeof(struct frag_hdr);
-
- skb_shinfo(head)->frag_list = head->next;
- skb_reset_transport_header(head);
- skb_push(head, head->data - skb_network_header(head));
-
- for (fp = head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
- fp->sk = NULL;
- }
- sub_frag_mem_limit(fq->q.net, head->truesize);
+ skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
+ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+ (skb->data - skb->head) - sizeof(struct frag_hdr));
+ skb->mac_header += sizeof(struct frag_hdr);
+ skb->network_header += sizeof(struct frag_hdr);
+
+ skb_reset_transport_header(skb);
+
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
- head->ignore_df = 1;
- skb_mark_not_on_list(head);
- head->dev = dev;
- head->tstamp = fq->q.stamp;
- ipv6_hdr(head)->payload_len = htons(payload_len);
- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
- IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
+ skb->ignore_df = 1;
+ skb->dev = dev;
+ ipv6_hdr(skb)->payload_len = htons(payload_len);
+ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+ IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
/* Yes, and fold redundant checksum back. 8) */
- if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_partial(skb_network_header(head),
- skb_network_header_len(head),
- head->csum);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_partial(skb_network_header(skb),
+ skb_network_header_len(skb),
+ skb->csum);
fq->q.fragments = NULL;
fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
+ fq->q.last_run_head = NULL;
- return true;
+ return 0;
+
+err:
+ inet_frag_kill(&fq->q);
+ return -EINVAL;
}
/*
@@ -542,7 +443,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
{
u16 savethdr = skb->transport_header;
- struct net_device *dev = skb->dev;
int fhoff, nhoff, ret;
struct frag_hdr *fhdr;
struct frag_queue *fq;
@@ -565,10 +465,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
- fhdr->frag_off & htons(IP6_MF))
- return -EINVAL;
-
skb_orphan(skb);
fq = fq_find(net, fhdr->identification, user, hdr,
skb->dev ? skb->dev->ifindex : 0);
@@ -580,31 +476,17 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
spin_lock_bh(&fq->q.lock);
ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
- if (ret < 0) {
- if (ret == -EPROTO) {
- skb->transport_header = savethdr;
- ret = 0;
- }
- goto out_unlock;
+ if (ret == -EPROTO) {
+ skb->transport_header = savethdr;
+ ret = 0;
}
/* after queue has assumed skb ownership, only 0 or -EINPROGRESS
* must be returned.
*/
- ret = -EINPROGRESS;
- if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
- fq->q.meat == fq->q.len) {
- unsigned long orefdst = skb->_skb_refdst;
-
- skb->_skb_refdst = 0UL;
- if (nf_ct_frag6_reasm(fq, skb, dev))
- ret = 0;
- skb->_skb_refdst = orefdst;
- } else {
- skb_dst_drop(skb);
- }
+ if (ret)
+ ret = -EINPROGRESS;
-out_unlock:
spin_unlock_bh(&fq->q.lock);
inet_frag_put(&fq->q);
return ret;
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 23022447eb49..9c914db44bec 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -225,7 +225,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
skb->len - hdrlen, 0));
}
- nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
return 0;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 36a3d8dc61f5..24264d0a4b85 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -69,8 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
static struct inet_frags ip6_frags;
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
- struct net_device *dev);
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev);
static void ip6_frag_expire(struct timer_list *t)
{
@@ -111,21 +111,26 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
struct frag_hdr *fhdr, int nhoff,
u32 *prob_offset)
{
- struct sk_buff *prev, *next;
- struct net_device *dev;
- int offset, end, fragsize;
struct net *net = dev_net(skb_dst(skb)->dev);
+ int offset, end, fragsize;
+ struct sk_buff *prev_tail;
+ struct net_device *dev;
+ int err = -ENOENT;
u8 ecn;
if (fq->q.flags & INET_FRAG_COMPLETE)
goto err;
+ err = -EINVAL;
offset = ntohs(fhdr->frag_off) & ~0x7;
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
*prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
+ /* note that if prob_offset is set, the skb is freed elsewhere,
+ * we do not free it here.
+ */
return -1;
}
@@ -170,62 +175,27 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
if (end == offset)
goto discard_fq;
+ err = -ENOMEM;
/* Point into the IP datagram 'data' part. */
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
goto discard_fq;
- if (pskb_trim_rcsum(skb, end - offset))
+ err = pskb_trim_rcsum(skb, end - offset);
+ if (err)
goto discard_fq;
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
- prev = fq->q.fragments_tail;
- if (!prev || prev->ip_defrag_offset < offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = fq->q.fragments; next != NULL; next = next->next) {
- if (next->ip_defrag_offset >= offset)
- break; /* bingo! */
- prev = next;
- }
-
-found:
- /* RFC5722, Section 4, amended by Errata ID : 3089
- * When reassembling an IPv6 datagram, if
- * one or more its constituent fragments is determined to be an
- * overlapping fragment, the entire datagram (and any constituent
- * fragments) MUST be silently discarded.
- */
-
- /* Check for overlap with preceding fragment. */
- if (prev &&
- (prev->ip_defrag_offset + prev->len) > offset)
- goto discard_fq;
-
- /* Look for overlap with succeeding segment. */
- if (next && next->ip_defrag_offset < end)
- goto discard_fq;
-
- /* Note : skb->ip_defrag_offset and skb->sk share the same location */
+ /* Note : skb->rbnode and skb->dev share the same location. */
dev = skb->dev;
- if (dev)
- fq->iif = dev->ifindex;
/* Makes sure compiler wont do silly aliasing games */
barrier();
- skb->ip_defrag_offset = offset;
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- fq->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- fq->q.fragments = skb;
+ prev_tail = fq->q.fragments_tail;
+ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+ if (err)
+ goto insert_error;
+
+ if (dev)
+ fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
@@ -246,44 +216,48 @@ found:
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len) {
- int res;
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
- res = ip6_frag_reasm(fq, prev, dev);
+ err = ip6_frag_reasm(fq, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
- return res;
+ return err;
}
skb_dst_drop(skb);
- return -1;
+ return -EINPROGRESS;
+insert_error:
+ if (err == IPFRAG_DUP) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ err = -EINVAL;
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_REASM_OVERLAPS);
discard_fq:
inet_frag_kill(&fq->q);
-err:
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
+err:
kfree_skb(skb);
- return -1;
+ return err;
}
/*
* Check if this packet is complete.
- * Returns NULL on failure by any reason, and pointer
- * to current nexthdr field in reassembled frame.
*
* It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here.
*/
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
- struct net_device *dev)
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev)
{
struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
- struct sk_buff *fp, *head = fq->q.fragments;
- int payload_len, delta;
unsigned int nhoff;
- int sum_truesize;
+ void *reasm_data;
+ int payload_len;
u8 ecn;
inet_frag_kill(&fq->q);
@@ -292,121 +266,40 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
if (unlikely(ecn == 0xff))
goto out_fail;
- /* Make the one we just received the head. */
- if (prev) {
- head = prev->next;
- fp = skb_clone(head, GFP_ATOMIC);
-
- if (!fp)
- goto out_oom;
-
- fp->next = head->next;
- if (!fp->next)
- fq->q.fragments_tail = fp;
- prev->next = fp;
-
- skb_morph(head, fq->q.fragments);
- head->next = fq->q.fragments->next;
-
- consume_skb(fq->q.fragments);
- fq->q.fragments = head;
- }
-
- WARN_ON(head == NULL);
- WARN_ON(head->ip_defrag_offset != 0);
+ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+ if (!reasm_data)
+ goto out_oom;
- /* Unfragmented part is taken from the first segment. */
- payload_len = ((head->data - skb_network_header(head)) -
+ payload_len = ((skb->data - skb_network_header(skb)) -
sizeof(struct ipv6hdr) + fq->q.len -
sizeof(struct frag_hdr));
if (payload_len > IPV6_MAXPLEN)
goto out_oversize;
- delta = - head->truesize;
-
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
- goto out_oom;
-
- delta += head->truesize;
- if (delta)
- add_frag_mem_limit(fq->q.net, delta);
-
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments. */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (!clone)
- goto out_oom;
- clone->next = head->next;
- head->next = clone;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = clone->data_len = head->data_len - plen;
- head->data_len -= clone->len;
- head->len -= clone->len;
- clone->csum = 0;
- clone->ip_summed = head->ip_summed;
- add_frag_mem_limit(fq->q.net, clone->truesize);
- }
-
/* We have to remove fragment header from datagram and to relocate
* header in order to calculate ICV correctly. */
nhoff = fq->nhoffset;
- skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
- memmove(head->head + sizeof(struct frag_hdr), head->head,
- (head->data - head->head) - sizeof(struct frag_hdr));
- if (skb_mac_header_was_set(head))
- head->mac_header += sizeof(struct frag_hdr);
- head->network_header += sizeof(struct frag_hdr);
-
- skb_reset_transport_header(head);
- skb_push(head, head->data - skb_network_header(head));
-
- sum_truesize = head->truesize;
- for (fp = head->next; fp;) {
- bool headstolen;
- int delta;
- struct sk_buff *next = fp->next;
-
- sum_truesize += fp->truesize;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
-
- if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
- kfree_skb_partial(fp, headstolen);
- } else {
- fp->sk = NULL;
- if (!skb_shinfo(head)->frag_list)
- skb_shinfo(head)->frag_list = fp;
- head->data_len += fp->len;
- head->len += fp->len;
- head->truesize += fp->truesize;
- }
- fp = next;
- }
- sub_frag_mem_limit(fq->q.net, sum_truesize);
+ skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
+ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+ (skb->data - skb->head) - sizeof(struct frag_hdr));
+ if (skb_mac_header_was_set(skb))
+ skb->mac_header += sizeof(struct frag_hdr);
+ skb->network_header += sizeof(struct frag_hdr);
+
+ skb_reset_transport_header(skb);
+
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
- skb_mark_not_on_list(head);
- head->dev = dev;
- head->tstamp = fq->q.stamp;
- ipv6_hdr(head)->payload_len = htons(payload_len);
- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
- IP6CB(head)->nhoff = nhoff;
- IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
- IP6CB(head)->frag_max_size = fq->q.max_size;
+ skb->dev = dev;
+ ipv6_hdr(skb)->payload_len = htons(payload_len);
+ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+ IP6CB(skb)->nhoff = nhoff;
+ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+ IP6CB(skb)->frag_max_size = fq->q.max_size;
/* Yes, and fold redundant checksum back. 8) */
- skb_postpush_rcsum(head, skb_network_header(head),
- skb_network_header_len(head));
+ skb_postpush_rcsum(skb, skb_network_header(skb),
+ skb_network_header_len(skb));
rcu_read_lock();
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
@@ -414,6 +307,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
fq->q.fragments = NULL;
fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
+ fq->q.last_run_head = NULL;
return 1;
out_oversize:
@@ -464,10 +358,6 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
return 1;
}
- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
- fhdr->frag_off & htons(IP6_MF))
- goto fail_hdr;
-
iif = skb->dev ? skb->dev->ifindex : 0;
fq = fq_find(net, fhdr->identification, hdr, iif);
if (fq) {
@@ -485,6 +375,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
if (prob_offset) {
__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
IPSTATS_MIB_INHDRERRORS);
+ /* icmpv6_param_prob() calls kfree_skb(skb) */
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
}
return ret;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 964491cf3672..dc066fdf7e46 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4812,6 +4812,73 @@ int rt6_dump_route(struct fib6_info *rt, void *p_arg)
arg->cb->nlh->nlmsg_seq, flags);
}
+static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid header for get route request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv6_policy, extack);
+
+ rtm = nlmsg_data(nlh);
+ if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
+ (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
+ rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
+ rtm->rtm_type) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
+ return -EINVAL;
+ }
+ if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid flags for get route request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv6_policy, extack);
+ if (err)
+ return err;
+
+ if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
+ (tb[RTA_DST] && !rtm->rtm_dst_len)) {
+ NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= RTA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case RTA_SRC:
+ case RTA_DST:
+ case RTA_IIF:
+ case RTA_OIF:
+ case RTA_MARK:
+ case RTA_UID:
+ case RTA_SPORT:
+ case RTA_DPORT:
+ case RTA_IP_PROTO:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -4826,8 +4893,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct flowi6 fl6 = {};
bool fibmatch;
- err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
- extack);
+ err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
if (err < 0)
goto errout;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b81eb7cb815e..e51cda79f0cc 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1864,7 +1864,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
refcount_read(&sp->sk_refcnt), sp,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
- (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
+ (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
tp->snd_cwnd,
state == TCP_LISTEN ?
fastopenq->max_qlen :
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7d55d4c04088..2662a23c658e 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1209,21 +1209,57 @@ static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
};
+static int mpls_netconf_valid_get_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid header for netconf get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_mpls_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_mpls_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= NETCONFA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NETCONFA_IFINDEX:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[NETCONFA_MAX + 1];
- struct netconfmsg *ncm;
struct net_device *dev;
struct mpls_dev *mdev;
struct sk_buff *skb;
int ifindex;
int err;
- err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
- devconf_mpls_policy, extack);
+ err = mpls_netconf_valid_get_req(in_skb, nlh, tb, extack);
if (err < 0)
goto errout;
@@ -2236,6 +2272,64 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
}
+static int mpls_valid_getroute_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid header for get route request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_mpls_policy, extack);
+
+ rtm = nlmsg_data(nlh);
+ if ((rtm->rtm_dst_len && rtm->rtm_dst_len != 20) ||
+ rtm->rtm_src_len || rtm->rtm_tos || rtm->rtm_table ||
+ rtm->rtm_protocol || rtm->rtm_scope || rtm->rtm_type) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
+ return -EINVAL;
+ }
+ if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid flags for get route request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_mpls_policy, extack);
+ if (err)
+ return err;
+
+ if ((tb[RTA_DST] || tb[RTA_NEWDST]) && !rtm->rtm_dst_len) {
+ NL_SET_ERR_MSG_MOD(extack, "rtm_dst_len must be 20 for MPLS");
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= RTA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case RTA_DST:
+ case RTA_NEWDST:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
struct netlink_ext_ack *extack)
{
@@ -2255,8 +2349,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
u8 n_labels;
int err;
- err = nlmsg_parse(in_nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_mpls_policy, extack);
+ err = mpls_valid_getroute_req(in_skb, in_nlh, tb, extack);
if (err < 0)
goto errout;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index beb3a69ce1d4..fefd63a243f2 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -174,7 +174,7 @@ config NF_CT_PROTO_DCCP
If unsure, say Y.
config NF_CT_PROTO_GRE
- tristate
+ bool
config NF_CT_PROTO_SCTP
bool 'SCTP protocol connection tracking support'
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1ae65a314d7a..e66067befa42 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -13,6 +13,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
+nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -25,8 +26,6 @@ obj-$(CONFIG_NETFILTER_NETLINK_OSF) += nfnetlink_osf.o
# connection tracking
obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
-obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
-
# netlink interface for nf_conntrack
obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index fe9abf3cc10a..e969dad66991 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -53,6 +53,7 @@
#endif
#include <net/ip_vs.h>
+#include <linux/indirect_call_wrapper.h>
EXPORT_SYMBOL(register_ip_vs_scheduler);
@@ -70,6 +71,29 @@ EXPORT_SYMBOL(ip_vs_get_debug_level);
#endif
EXPORT_SYMBOL(ip_vs_new_conn_out);
+#ifdef CONFIG_IP_VS_PROTO_TCP
+INDIRECT_CALLABLE_DECLARE(int
+ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+INDIRECT_CALLABLE_DECLARE(int
+ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
+
+#if defined(CONFIG_IP_VS_PROTO_TCP) && defined(CONFIG_IP_VS_PROTO_UDP)
+#define SNAT_CALL(f, ...) \
+ INDIRECT_CALL_2(f, tcp_snat_handler, udp_snat_handler, __VA_ARGS__)
+#elif defined(CONFIG_IP_VS_PROTO_TCP)
+#define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, tcp_snat_handler, __VA_ARGS__)
+#elif defined(CONFIG_IP_VS_PROTO_UDP)
+#define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, udp_snat_handler, __VA_ARGS__)
+#else
+#define SNAT_CALL(f, ...) f(__VA_ARGS__)
+#endif
+
static unsigned int ip_vs_net_id __read_mostly;
/* netns cnt used for uniqueness */
static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
@@ -478,7 +502,9 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
*/
if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
iph->hdr_flags ^= IP_VS_HDR_INVERSE;
- cp = pp->conn_in_get(svc->ipvs, svc->af, skb, iph);
+ cp = INDIRECT_CALL_1(pp->conn_in_get,
+ ip_vs_conn_in_get_proto, svc->ipvs,
+ svc->af, skb, iph);
iph->hdr_flags ^= IP_VS_HDR_INVERSE;
if (cp) {
@@ -972,7 +998,8 @@ static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(ipvs, AF_INET, skb, &ciph);
+ cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
+ ipvs, AF_INET, skb, &ciph);
if (!cp)
return NF_ACCEPT;
@@ -1028,7 +1055,8 @@ static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
return NF_ACCEPT;
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(ipvs, AF_INET6, skb, &ciph);
+ cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
+ ipvs, AF_INET6, skb, &ciph);
if (!cp)
return NF_ACCEPT;
@@ -1263,7 +1291,8 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
goto drop;
/* mangle the packet */
- if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
+ if (pp->snat_handler &&
+ !SNAT_CALL(pp->snat_handler, skb, pp, cp, iph))
goto drop;
#ifdef CONFIG_IP_VS_IPV6
@@ -1389,7 +1418,8 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
/*
* Check if the packet belongs to an existing entry
*/
- cp = pp->conn_out_get(ipvs, af, skb, &iph);
+ cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
+ ipvs, af, skb, &iph);
if (likely(cp)) {
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
@@ -1644,7 +1674,8 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
/* The embedded headers contain source and dest in reverse order.
* For IPIP this is error for request, not for reply.
*/
- cp = pp->conn_in_get(ipvs, AF_INET, skb, &ciph);
+ cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
+ ipvs, AF_INET, skb, &ciph);
if (!cp) {
int v;
@@ -1796,7 +1827,8 @@ static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
/* The embedded headers contain source and dest in reverse order
* if not from localhost
*/
- cp = pp->conn_in_get(ipvs, AF_INET6, skb, &ciph);
+ cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
+ ipvs, AF_INET6, skb, &ciph);
if (!cp) {
int v;
@@ -1925,7 +1957,8 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
/*
* Check if the packet belongs to an existing connection entry
*/
- cp = pp->conn_in_get(ipvs, af, skb, &iph);
+ cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
+ ipvs, af, skb, &iph);
conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 5320d39976e1..480598cb0f05 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -129,7 +129,6 @@ struct ip_vs_protocol ip_vs_protocol_ah = {
.conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
- .csum_check = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
@@ -152,7 +151,6 @@ struct ip_vs_protocol ip_vs_protocol_esp = {
.conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
- .csum_check = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index b0cd7d08f2a7..bc3d1625ecc8 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -10,6 +10,9 @@
#include <net/ip_vs.h>
static int
+sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
+
+static int
sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
@@ -105,7 +108,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!sctp_csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
@@ -152,7 +155,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!sctp_csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
@@ -587,7 +590,6 @@ struct ip_vs_protocol ip_vs_protocol_sctp = {
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = sctp_snat_handler,
.dnat_handler = sctp_dnat_handler,
- .csum_check = sctp_csum_check,
.state_name = sctp_state_name,
.state_transition = sctp_state_transition,
.app_conn_bind = sctp_app_conn_bind,
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 1770fc6ce960..479419759983 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -28,10 +28,14 @@
#include <net/ip6_checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/ip_vs.h>
static int
+tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
+
+static int
tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
@@ -143,7 +147,7 @@ tcp_partial_csum_update(int af, struct tcphdr *tcph,
}
-static int
+INDIRECT_CALLABLE_SCOPE int
tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
@@ -166,7 +170,7 @@ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!tcp_csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
@@ -192,7 +196,7 @@ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
cp->dport, cp->vport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = (cp->app && pp->csum_check) ?
+ skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
@@ -244,7 +248,7 @@ tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!tcp_csum_check(cp->af, skb, pp))
return 0;
/*
@@ -275,7 +279,7 @@ tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
cp->vport, cp->dport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = (cp->app && pp->csum_check) ?
+ skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
@@ -736,7 +740,6 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = tcp_snat_handler,
.dnat_handler = tcp_dnat_handler,
- .csum_check = tcp_csum_check,
.state_name = tcp_state_name,
.state_transition = tcp_state_transition,
.app_conn_bind = tcp_app_conn_bind,
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 0f53c49025f8..646c384910fb 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -23,12 +23,16 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/udp.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/ip_vs.h>
#include <net/ip.h>
#include <net/ip6_checksum.h>
static int
+udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
+
+static int
udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
@@ -133,7 +137,7 @@ udp_partial_csum_update(int af, struct udphdr *uhdr,
}
-static int
+INDIRECT_CALLABLE_SCOPE int
udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
@@ -156,7 +160,7 @@ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!udp_csum_check(cp->af, skb, pp))
return 0;
/*
@@ -186,7 +190,7 @@ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
cp->dport, cp->vport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = (cp->app && pp->csum_check) ?
+ skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
@@ -239,7 +243,7 @@ udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!udp_csum_check(cp->af, skb, pp))
return 0;
/*
@@ -270,7 +274,7 @@ udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
cp->vport, cp->dport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = (cp->app && pp->csum_check) ?
+ skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
@@ -494,7 +498,6 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = udp_snat_handler,
.dnat_handler = udp_dnat_handler,
- .csum_check = udp_csum_check,
.state_transition = udp_state_transition,
.state_name = udp_state_name,
.register_app = udp_register_app,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 741b533148ba..815956ac5a76 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -222,6 +222,24 @@ static u32 hash_conntrack(const struct net *net,
return scale_hash(hash_conntrack_raw(tuple, net));
}
+static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple)
+{ struct {
+ __be16 sport;
+ __be16 dport;
+ } _inet_hdr, *inet_hdr;
+
+ /* Actually only need first 4 bytes to get ports. */
+ inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
+ if (!inet_hdr)
+ return false;
+
+ tuple->src.u.udp.port = inet_hdr->sport;
+ tuple->dst.u.udp.port = inet_hdr->dport;
+ return true;
+}
+
static bool
nf_ct_get_tuple(const struct sk_buff *skb,
unsigned int nhoff,
@@ -229,16 +247,11 @@ nf_ct_get_tuple(const struct sk_buff *skb,
u_int16_t l3num,
u_int8_t protonum,
struct net *net,
- struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l4proto *l4proto)
+ struct nf_conntrack_tuple *tuple)
{
unsigned int size;
const __be32 *ap;
__be32 _addrs[8];
- struct {
- __be16 sport;
- __be16 dport;
- } _inet_hdr, *inet_hdr;
memset(tuple, 0, sizeof(*tuple));
@@ -274,16 +287,36 @@ nf_ct_get_tuple(const struct sk_buff *skb,
tuple->dst.protonum = protonum;
tuple->dst.dir = IP_CT_DIR_ORIGINAL;
- if (unlikely(l4proto->pkt_to_tuple))
- return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
-
- /* Actually only need first 4 bytes to get ports. */
- inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
- if (!inet_hdr)
- return false;
+ switch (protonum) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+ case IPPROTO_ICMP:
+ return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ case IPPROTO_GRE:
+ return gre_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+ case IPPROTO_TCP:
+ case IPPROTO_UDP: /* fallthrough */
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ case IPPROTO_UDPLITE:
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ case IPPROTO_SCTP:
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ case IPPROTO_DCCP:
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+ default:
+ break;
+ }
- tuple->src.u.udp.port = inet_hdr->sport;
- tuple->dst.u.udp.port = inet_hdr->dport;
return true;
}
@@ -366,33 +399,20 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num,
struct net *net, struct nf_conntrack_tuple *tuple)
{
- const struct nf_conntrack_l4proto *l4proto;
u8 protonum;
int protoff;
- int ret;
-
- rcu_read_lock();
protoff = get_l4proto(skb, nhoff, l3num, &protonum);
- if (protoff <= 0) {
- rcu_read_unlock();
+ if (protoff <= 0)
return false;
- }
-
- l4proto = __nf_ct_l4proto_find(protonum);
- ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
- l4proto);
-
- rcu_read_unlock();
- return ret;
+ return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
}
EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
bool
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l4proto *l4proto)
+ const struct nf_conntrack_tuple *orig)
{
memset(inverse, 0, sizeof(*inverse));
@@ -415,8 +435,14 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
inverse->dst.protonum = orig->dst.protonum;
- if (unlikely(l4proto->invert_tuple))
- return l4proto->invert_tuple(inverse, orig);
+ switch (orig->dst.protonum) {
+ case IPPROTO_ICMP:
+ return nf_conntrack_invert_icmp_tuple(inverse, orig);
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
+#endif
+ }
inverse->src.u.all = orig->dst.u.all;
inverse->dst.u.all = orig->src.u.all;
@@ -526,11 +552,20 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl)
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
+static void destroy_gre_conntrack(struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ struct nf_conn *master = ct->master;
+
+ if (master)
+ nf_ct_gre_keymap_destroy(master);
+#endif
+}
+
static void
destroy_conntrack(struct nf_conntrack *nfct)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
- const struct nf_conntrack_l4proto *l4proto;
pr_debug("destroy_conntrack(%p)\n", ct);
WARN_ON(atomic_read(&nfct->use) != 0);
@@ -539,9 +574,9 @@ destroy_conntrack(struct nf_conntrack *nfct)
nf_ct_tmpl_free(ct);
return;
}
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
- if (l4proto->destroy)
- l4proto->destroy(ct);
+
+ if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
+ destroy_gre_conntrack(ct);
local_bh_disable();
/* Expectations will have been removed in clean_from_lists,
@@ -840,7 +875,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
enum ip_conntrack_info oldinfo;
struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->allow_clash &&
!nf_ct_is_dying(ct) &&
atomic_inc_not_zero(&ct->ct_general.use)) {
@@ -1112,7 +1147,7 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
if (!test_bit(IPS_ASSURED_BIT, &ct->status))
return true;
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
return true;
@@ -1342,7 +1377,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
static noinline struct nf_conntrack_tuple_hash *
init_conntrack(struct net *net, struct nf_conn *tmpl,
const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l4proto *l4proto,
struct sk_buff *skb,
unsigned int dataoff, u32 hash)
{
@@ -1355,7 +1389,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conn_timeout *timeout_ext;
struct nf_conntrack_zone tmp;
- if (!nf_ct_invert_tuple(&repl_tuple, tuple, l4proto)) {
+ if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
pr_debug("Can't invert tuple.\n");
return NULL;
}
@@ -1437,7 +1471,6 @@ resolve_normal_ct(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
u_int8_t protonum,
- const struct nf_conntrack_l4proto *l4proto,
const struct nf_hook_state *state)
{
const struct nf_conntrack_zone *zone;
@@ -1450,7 +1483,7 @@ resolve_normal_ct(struct nf_conn *tmpl,
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, state->pf, protonum, state->net,
- &tuple, l4proto)) {
+ &tuple)) {
pr_debug("Can't get tuple\n");
return 0;
}
@@ -1460,7 +1493,7 @@ resolve_normal_ct(struct nf_conn *tmpl,
hash = hash_conntrack_raw(&tuple, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
if (!h) {
- h = init_conntrack(state->net, tmpl, &tuple, l4proto,
+ h = init_conntrack(state->net, tmpl, &tuple,
skb, dataoff, hash);
if (!h)
return 0;
@@ -1522,10 +1555,66 @@ nf_conntrack_handle_icmp(struct nf_conn *tmpl,
return ret;
}
+static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo)
+{
+ const unsigned int *timeout = nf_ct_timeout_lookup(ct);
+
+ if (!timeout)
+ timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
+
+ nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
+ return NF_ACCEPT;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int nf_conntrack_handle_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
+{
+ switch (nf_ct_protonum(ct)) {
+ case IPPROTO_TCP:
+ return nf_conntrack_tcp_packet(ct, skb, dataoff,
+ ctinfo, state);
+ case IPPROTO_UDP:
+ return nf_conntrack_udp_packet(ct, skb, dataoff,
+ ctinfo, state);
+ case IPPROTO_ICMP:
+ return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ case IPPROTO_UDPLITE:
+ return nf_conntrack_udplite_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ case IPPROTO_SCTP:
+ return nf_conntrack_sctp_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ case IPPROTO_DCCP:
+ return nf_conntrack_dccp_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ case IPPROTO_GRE:
+ return nf_conntrack_gre_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+ }
+
+ return generic_packet(ct, skb, ctinfo);
+}
+
unsigned int
nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
{
- const struct nf_conntrack_l4proto *l4proto;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct, *tmpl;
u_int8_t protonum;
@@ -1552,8 +1641,6 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
goto out;
}
- l4proto = __nf_ct_l4proto_find(protonum);
-
if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
protonum, state);
@@ -1567,7 +1654,7 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
}
repeat:
ret = resolve_normal_ct(tmpl, skb, dataoff,
- protonum, l4proto, state);
+ protonum, state);
if (ret < 0) {
/* Too stressed to deal. */
NF_CT_STAT_INC_ATOMIC(state->net, drop);
@@ -1583,7 +1670,7 @@ repeat:
goto out;
}
- ret = l4proto->packet(ct, skb, dataoff, ctinfo, state);
+ ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
@@ -1614,19 +1701,6 @@ out:
}
EXPORT_SYMBOL_GPL(nf_conntrack_in);
-bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig)
-{
- bool ret;
-
- rcu_read_lock();
- ret = nf_ct_invert_tuple(inverse, orig,
- __nf_ct_l4proto_find(orig->dst.protonum));
- rcu_read_unlock();
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
-
/* Alter reply tuple (maybe alter helper). This is for NAT, and is
implicitly racy: see __nf_conntrack_confirm */
void nf_conntrack_alter_reply(struct nf_conn *ct,
@@ -1757,7 +1831,6 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
{
- const struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
enum ip_conntrack_info ctinfo;
@@ -1778,10 +1851,8 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
if (dataoff <= 0)
return -1;
- l4proto = nf_ct_l4proto_find_get(l4num);
-
if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
- l4num, net, &tuple, l4proto))
+ l4num, net, &tuple))
return -1;
if (ct->status & IPS_SRC_NAT) {
@@ -2413,15 +2484,10 @@ int nf_conntrack_init_net(struct net *net)
nf_conntrack_tstamp_pernet_init(net);
nf_conntrack_ecache_pernet_init(net);
nf_conntrack_helper_pernet_init(net);
+ nf_conntrack_proto_pernet_init(net);
- ret = nf_conntrack_proto_pernet_init(net);
- if (ret < 0)
- goto err_proto;
return 0;
-err_proto:
- nf_conntrack_ecache_pernet_fini(net);
- nf_conntrack_expect_pernet_fini(net);
err_expect:
free_percpu(net->ct.stat);
err_pcpu_lists:
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 3034038bfdf0..334d6e5b7762 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -610,7 +610,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
expect->tuple.src.l3num,
expect->tuple.dst.protonum);
print_tuple(s, &expect->tuple,
- __nf_ct_l4proto_find(expect->tuple.dst.protonum));
+ nf_ct_l4proto_find(expect->tuple.dst.protonum));
if (expect->flags & NF_CT_EXPECT_PERMANENT) {
seq_puts(s, "PERMANENT");
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1213beb5a714..8071bb04a849 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -134,7 +134,7 @@ static int ctnetlink_dump_tuples(struct sk_buff *skb,
ret = ctnetlink_dump_tuples_ip(skb, tuple);
if (ret >= 0) {
- l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
+ l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
}
rcu_read_unlock();
@@ -182,7 +182,7 @@ static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
struct nlattr *nest_proto;
int ret;
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (!l4proto->to_nlattr)
return 0;
@@ -590,7 +590,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
len *= 3u; /* ORIG, REPLY, MASTER */
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
len += l4proto->nlattr_size;
if (l4proto->nlattr_tuple_size) {
len4 = l4proto->nlattr_tuple_size();
@@ -1059,7 +1059,7 @@ static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
rcu_read_lock();
- l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
+ l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
if (likely(l4proto->nlattr_to_tuple)) {
ret = nla_validate_nested(attr, CTA_PROTO_MAX,
@@ -1722,11 +1722,9 @@ static int ctnetlink_change_protoinfo(struct nf_conn *ct,
if (err < 0)
return err;
- rcu_read_lock();
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->from_nlattr)
err = l4proto->from_nlattr(tb, ct);
- rcu_read_unlock();
return err;
}
@@ -2676,7 +2674,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
rcu_read_lock();
ret = ctnetlink_dump_tuples_ip(skb, &m);
if (ret >= 0) {
- l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
+ l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
}
rcu_read_unlock();
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 11562f2a08bb..976f1dcb97f0 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -121,7 +121,7 @@ static void pptp_expectfn(struct nf_conn *ct,
struct nf_conntrack_expect *exp_other;
/* obviously this tuple inversion only works until you do NAT */
- nf_ct_invert_tuplepr(&inv_t, &exp->tuple);
+ nf_ct_invert_tuple(&inv_t, &exp->tuple);
pr_debug("trying to unexpect other dir: ");
nf_ct_dump_tuple(&inv_t);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 859f5d07a915..b9403a266a2e 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -43,40 +43,9 @@
extern unsigned int nf_conntrack_net_id;
-static struct nf_conntrack_l4proto __rcu *nf_ct_protos[MAX_NF_CT_PROTO + 1] __read_mostly;
-
static DEFINE_MUTEX(nf_ct_proto_mutex);
#ifdef CONFIG_SYSCTL
-static int
-nf_ct_register_sysctl(struct net *net,
- struct ctl_table_header **header,
- const char *path,
- struct ctl_table *table)
-{
- if (*header == NULL) {
- *header = register_net_sysctl(net, path, table);
- if (*header == NULL)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void
-nf_ct_unregister_sysctl(struct ctl_table_header **header,
- struct ctl_table **table,
- unsigned int users)
-{
- if (users > 0)
- return;
-
- unregister_net_sysctl_table(*header);
- kfree(*table);
- *header = NULL;
- *table = NULL;
-}
-
__printf(5, 6)
void nf_l4proto_log_invalid(const struct sk_buff *skb,
struct net *net,
@@ -124,295 +93,82 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
EXPORT_SYMBOL_GPL(nf_ct_l4proto_log_invalid);
#endif
-const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto)
-{
- if (unlikely(l4proto >= ARRAY_SIZE(nf_ct_protos)))
- return &nf_conntrack_l4proto_generic;
-
- return rcu_dereference(nf_ct_protos[l4proto]);
-}
-EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
-
-const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4num)
-{
- const struct nf_conntrack_l4proto *p;
-
- rcu_read_lock();
- p = __nf_ct_l4proto_find(l4num);
- if (!try_module_get(p->me))
- p = &nf_conntrack_l4proto_generic;
- rcu_read_unlock();
-
- return p;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get);
-
-void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p)
-{
- module_put(p->me);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_put);
-
-static int kill_l4proto(struct nf_conn *i, void *data)
-{
- const struct nf_conntrack_l4proto *l4proto;
- l4proto = data;
- return nf_ct_protonum(i) == l4proto->l4proto;
-}
-
-static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
- const struct nf_conntrack_l4proto *l4proto)
-{
- if (l4proto->get_net_proto) {
- /* statically built-in protocols use static per-net */
- return l4proto->get_net_proto(net);
- } else if (l4proto->net_id) {
- /* ... and loadable protocols use dynamic per-net */
- return net_generic(net, *l4proto->net_id);
- }
- return NULL;
-}
-
-static
-int nf_ct_l4proto_register_sysctl(struct net *net,
- struct nf_proto_net *pn)
-{
- int err = 0;
-
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table != NULL) {
- err = nf_ct_register_sysctl(net,
- &pn->ctl_table_header,
- "net/netfilter",
- pn->ctl_table);
- if (err < 0) {
- if (!pn->users) {
- kfree(pn->ctl_table);
- pn->ctl_table = NULL;
- }
- }
- }
-#endif /* CONFIG_SYSCTL */
- return err;
-}
-
-static
-void nf_ct_l4proto_unregister_sysctl(struct nf_proto_net *pn)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table_header != NULL)
- nf_ct_unregister_sysctl(&pn->ctl_table_header,
- &pn->ctl_table,
- pn->users);
-#endif /* CONFIG_SYSCTL */
-}
-
-/* FIXME: Allow NULL functions and sub in pointers to generic for
- them. --RR */
-int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto)
-{
- int ret = 0;
-
- if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) ||
- (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
- return -EINVAL;
-
- mutex_lock(&nf_ct_proto_mutex);
- if (rcu_dereference_protected(
- nf_ct_protos[l4proto->l4proto],
- lockdep_is_held(&nf_ct_proto_mutex)
- ) != &nf_conntrack_l4proto_generic) {
- ret = -EBUSY;
- goto out_unlock;
- }
-
- rcu_assign_pointer(nf_ct_protos[l4proto->l4proto], l4proto);
-out_unlock:
- mutex_unlock(&nf_ct_proto_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_register_one);
-
-int nf_ct_l4proto_pernet_register_one(struct net *net,
- const struct nf_conntrack_l4proto *l4proto)
-{
- int ret = 0;
- struct nf_proto_net *pn = NULL;
-
- if (l4proto->init_net) {
- ret = l4proto->init_net(net);
- if (ret < 0)
- goto out;
- }
-
- pn = nf_ct_l4proto_net(net, l4proto);
- if (pn == NULL)
- goto out;
-
- ret = nf_ct_l4proto_register_sysctl(net, pn);
- if (ret < 0)
- goto out;
-
- pn->users++;
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register_one);
-
-static void __nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto)
-
-{
- BUG_ON(l4proto->l4proto >= ARRAY_SIZE(nf_ct_protos));
-
- BUG_ON(rcu_dereference_protected(
- nf_ct_protos[l4proto->l4proto],
- lockdep_is_held(&nf_ct_proto_mutex)
- ) != l4proto);
- rcu_assign_pointer(nf_ct_protos[l4proto->l4proto],
- &nf_conntrack_l4proto_generic);
-}
-
-void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto)
-{
- mutex_lock(&nf_ct_proto_mutex);
- __nf_ct_l4proto_unregister_one(l4proto);
- mutex_unlock(&nf_ct_proto_mutex);
-
- synchronize_net();
- /* Remove all contrack entries for this protocol */
- nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister_one);
-
-void nf_ct_l4proto_pernet_unregister_one(struct net *net,
- const struct nf_conntrack_l4proto *l4proto)
-{
- struct nf_proto_net *pn = nf_ct_l4proto_net(net, l4proto);
-
- if (pn == NULL)
- return;
-
- pn->users--;
- nf_ct_l4proto_unregister_sysctl(pn);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
-
-static void
-nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[],
- unsigned int num_proto)
-{
- int i;
-
- mutex_lock(&nf_ct_proto_mutex);
- for (i = 0; i < num_proto; i++)
- __nf_ct_l4proto_unregister_one(l4proto[i]);
- mutex_unlock(&nf_ct_proto_mutex);
-
- synchronize_net();
-
- for (i = 0; i < num_proto; i++)
- nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto[i]);
-}
-
-static int
-nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
- unsigned int num_proto)
-{
- int ret = -EINVAL;
- unsigned int i;
-
- for (i = 0; i < num_proto; i++) {
- ret = nf_ct_l4proto_register_one(l4proto[i]);
- if (ret < 0)
- break;
- }
- if (i != num_proto) {
- pr_err("nf_conntrack: can't register l4 %d proto.\n",
- l4proto[i]->l4proto);
- nf_ct_l4proto_unregister(l4proto, i);
- }
- return ret;
-}
-
-int nf_ct_l4proto_pernet_register(struct net *net,
- const struct nf_conntrack_l4proto *const l4proto[],
- unsigned int num_proto)
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto)
{
- int ret = -EINVAL;
- unsigned int i;
-
- for (i = 0; i < num_proto; i++) {
- ret = nf_ct_l4proto_pernet_register_one(net, l4proto[i]);
- if (ret < 0)
- break;
- }
- if (i != num_proto) {
- pr_err("nf_conntrack %d: pernet registration failed\n",
- l4proto[i]->l4proto);
- nf_ct_l4proto_pernet_unregister(net, l4proto, i);
+ switch (l4proto) {
+ case IPPROTO_UDP: return &nf_conntrack_l4proto_udp;
+ case IPPROTO_TCP: return &nf_conntrack_l4proto_tcp;
+ case IPPROTO_ICMP: return &nf_conntrack_l4proto_icmp;
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ case IPPROTO_DCCP: return &nf_conntrack_l4proto_dccp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ case IPPROTO_SCTP: return &nf_conntrack_l4proto_sctp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ case IPPROTO_UDPLITE: return &nf_conntrack_l4proto_udplite;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ case IPPROTO_GRE: return &nf_conntrack_l4proto_gre;
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6: return &nf_conntrack_l4proto_icmpv6;
+#endif /* CONFIG_IPV6 */
}
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register);
-void nf_ct_l4proto_pernet_unregister(struct net *net,
- const struct nf_conntrack_l4proto *const l4proto[],
- unsigned int num_proto)
-{
- while (num_proto-- != 0)
- nf_ct_l4proto_pernet_unregister_one(net, l4proto[num_proto]);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
+ return &nf_conntrack_l4proto_generic;
+};
+EXPORT_SYMBOL_GPL(nf_ct_l4proto_find);
-static unsigned int ipv4_helper(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static unsigned int nf_confirm(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
const struct nf_conn_help *help;
- const struct nf_conntrack_helper *helper;
-
- /* This is where we call the helper: as the packet goes out. */
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- return NF_ACCEPT;
help = nfct_help(ct);
- if (!help)
- return NF_ACCEPT;
+ if (help) {
+ const struct nf_conntrack_helper *helper;
+ int ret;
+
+ /* rcu_read_lock()ed by nf_hook_thresh */
+ helper = rcu_dereference(help->helper);
+ if (helper) {
+ ret = helper->help(skb,
+ protoff,
+ ct, ctinfo);
+ if (ret != NF_ACCEPT)
+ return ret;
+ }
+ }
- /* rcu_read_lock()ed by nf_hook_thresh */
- helper = rcu_dereference(help->helper);
- if (!helper)
- return NF_ACCEPT;
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+ !nf_is_loopback_packet(skb)) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
+ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+ return NF_DROP;
+ }
+ }
- return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
- ct, ctinfo);
+ /* We've seen it coming out the other side: confirm it */
+ return nf_conntrack_confirm(skb);
}
static unsigned int ipv4_confirm(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
+ return nf_conntrack_confirm(skb);
- /* adjust seqs for loopback traffic only in outgoing direction */
- if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
- !nf_is_loopback_packet(skb)) {
- if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
- NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
- return NF_DROP;
- }
- }
-out:
- /* We've seen it coming out the other side: confirm it */
- return nf_conntrack_confirm(skb);
+ return nf_confirm(skb,
+ skb_network_offset(skb) + ip_hdrlen(skb),
+ ct, ctinfo);
}
static unsigned int ipv4_conntrack_in(void *priv,
@@ -461,24 +217,12 @@ static const struct nf_hook_ops ipv4_conntrack_ops[] = {
.priority = NF_IP_PRI_CONNTRACK,
},
{
- .hook = ipv4_helper,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP_PRI_CONNTRACK_HELPER,
- },
- {
.hook = ipv4_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
- .hook = ipv4_helper,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP_PRI_CONNTRACK_HELPER,
- },
- {
.hook = ipv4_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
@@ -623,31 +367,21 @@ static unsigned int ipv6_confirm(void *priv,
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
unsigned char pnum = ipv6_hdr(skb)->nexthdr;
- int protoff;
__be16 frag_off;
+ int protoff;
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
+ return nf_conntrack_confirm(skb);
protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
&frag_off);
if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
pr_debug("proto header not found\n");
- goto out;
+ return nf_conntrack_confirm(skb);
}
- /* adjust seqs for loopback traffic only in outgoing direction */
- if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
- !nf_is_loopback_packet(skb)) {
- if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
- NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
- return NF_DROP;
- }
- }
-out:
- /* We've seen it coming out the other side: confirm it */
- return nf_conntrack_confirm(skb);
+ return nf_confirm(skb, protoff, ct, ctinfo);
}
static unsigned int ipv6_conntrack_in(void *priv,
@@ -664,42 +398,6 @@ static unsigned int ipv6_conntrack_local(void *priv,
return nf_conntrack_in(skb, state);
}
-static unsigned int ipv6_helper(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nf_conn *ct;
- const struct nf_conn_help *help;
- const struct nf_conntrack_helper *helper;
- enum ip_conntrack_info ctinfo;
- __be16 frag_off;
- int protoff;
- u8 nexthdr;
-
- /* This is where we call the helper: as the packet goes out. */
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- return NF_ACCEPT;
-
- help = nfct_help(ct);
- if (!help)
- return NF_ACCEPT;
- /* rcu_read_lock()ed by nf_hook_thresh */
- helper = rcu_dereference(help->helper);
- if (!helper)
- return NF_ACCEPT;
-
- nexthdr = ipv6_hdr(skb)->nexthdr;
- protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
- &frag_off);
- if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
- pr_debug("proto header not found\n");
- return NF_ACCEPT;
- }
-
- return helper->help(skb, protoff, ct, ctinfo);
-}
-
static const struct nf_hook_ops ipv6_conntrack_ops[] = {
{
.hook = ipv6_conntrack_in,
@@ -714,24 +412,12 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
.priority = NF_IP6_PRI_CONNTRACK,
},
{
- .hook = ipv6_helper,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP6_PRI_CONNTRACK_HELPER,
- },
- {
.hook = ipv6_confirm,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_LAST,
},
{
- .hook = ipv6_helper,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP6_PRI_CONNTRACK_HELPER,
- },
- {
.hook = ipv6_confirm,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
@@ -874,27 +560,9 @@ void nf_ct_netns_put(struct net *net, uint8_t nfproto)
}
EXPORT_SYMBOL_GPL(nf_ct_netns_put);
-static const struct nf_conntrack_l4proto * const builtin_l4proto[] = {
- &nf_conntrack_l4proto_tcp,
- &nf_conntrack_l4proto_udp,
- &nf_conntrack_l4proto_icmp,
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- &nf_conntrack_l4proto_dccp,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_SCTP
- &nf_conntrack_l4proto_sctp,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
- &nf_conntrack_l4proto_udplite,
-#endif
-#if IS_ENABLED(CONFIG_IPV6)
- &nf_conntrack_l4proto_icmpv6,
-#endif /* CONFIG_IPV6 */
-};
-
int nf_conntrack_proto_init(void)
{
- int ret = 0, i;
+ int ret;
ret = nf_register_sockopt(&so_getorigdst);
if (ret < 0)
@@ -906,18 +574,8 @@ int nf_conntrack_proto_init(void)
goto cleanup_sockopt;
#endif
- for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++)
- RCU_INIT_POINTER(nf_ct_protos[i],
- &nf_conntrack_l4proto_generic);
-
- ret = nf_ct_l4proto_register(builtin_l4proto,
- ARRAY_SIZE(builtin_l4proto));
- if (ret < 0)
- goto cleanup_sockopt2;
-
return ret;
-cleanup_sockopt2:
- nf_unregister_sockopt(&so_getorigdst);
+
#if IS_ENABLED(CONFIG_IPV6)
cleanup_sockopt:
nf_unregister_sockopt(&so_getorigdst6);
@@ -933,43 +591,33 @@ void nf_conntrack_proto_fini(void)
#endif
}
-int nf_conntrack_proto_pernet_init(struct net *net)
+void nf_conntrack_proto_pernet_init(struct net *net)
{
- int err;
- struct nf_proto_net *pn = nf_ct_l4proto_net(net,
- &nf_conntrack_l4proto_generic);
-
- err = nf_conntrack_l4proto_generic.init_net(net);
- if (err < 0)
- return err;
- err = nf_ct_l4proto_register_sysctl(net,
- pn);
- if (err < 0)
- return err;
-
- err = nf_ct_l4proto_pernet_register(net, builtin_l4proto,
- ARRAY_SIZE(builtin_l4proto));
- if (err < 0) {
- nf_ct_l4proto_unregister_sysctl(pn);
- return err;
- }
-
- pn->users++;
- return 0;
+ nf_conntrack_generic_init_net(net);
+ nf_conntrack_udp_init_net(net);
+ nf_conntrack_tcp_init_net(net);
+ nf_conntrack_icmp_init_net(net);
+#if IS_ENABLED(CONFIG_IPV6)
+ nf_conntrack_icmpv6_init_net(net);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ nf_conntrack_dccp_init_net(net);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ nf_conntrack_sctp_init_net(net);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ nf_conntrack_gre_init_net(net);
+#endif
}
void nf_conntrack_proto_pernet_fini(struct net *net)
{
- struct nf_proto_net *pn = nf_ct_l4proto_net(net,
- &nf_conntrack_l4proto_generic);
-
- nf_ct_l4proto_pernet_unregister(net, builtin_l4proto,
- ARRAY_SIZE(builtin_l4proto));
- pn->users--;
- nf_ct_l4proto_unregister_sysctl(pn);
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ nf_ct_gre_keymap_flush(net);
+#endif
}
-
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 023c1445bc39..6fca80587505 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -472,9 +472,10 @@ out_invalid:
return true;
}
-static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
- unsigned int dataoff, enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
@@ -723,123 +724,28 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-/* template, data assigned later */
-static struct ctl_table dccp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_dccp_timeout_request",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_respond",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_partopen",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_open",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_closereq",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_closing",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_timewait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_loose",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
- struct nf_dccp_net *dn)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table)
- return 0;
-
- pn->ctl_table = kmemdup(dccp_sysctl_table,
- sizeof(dccp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
- pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
- pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
- pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
- pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
- pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
- pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
- pn->ctl_table[7].data = &dn->dccp_loose;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- pn->ctl_table[0].procname = NULL;
-#endif
- return 0;
-}
-
-static int dccp_init_net(struct net *net)
+void nf_conntrack_dccp_init_net(struct net *net)
{
struct nf_dccp_net *dn = nf_dccp_pernet(net);
- struct nf_proto_net *pn = &dn->pn;
-
- if (!pn->users) {
- /* default values */
- dn->dccp_loose = 1;
- dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
-
- /* timeouts[0] is unused, make it same as SYN_SENT so
- * ->timeouts[0] contains 'new' timeout, like udp or icmp.
- */
- dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
- }
- return dccp_kmemdup_sysctl_table(net, pn, dn);
-}
-
-static struct nf_proto_net *dccp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.dccp.pn;
+ /* default values */
+ dn->dccp_loose = 1;
+ dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
+ dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
+ dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
+ dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
+
+ /* timeouts[0] is unused, make it same as SYN_SENT so
+ * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+ */
+ dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = {
.l4proto = IPPROTO_DCCP,
- .packet = dccp_packet,
.can_early_drop = dccp_can_early_drop,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = dccp_print_conntrack,
@@ -862,6 +768,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = {
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = dccp_init_net,
- .get_net_proto = dccp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 5da19d5fbc76..0f526fafecae 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -15,50 +15,6 @@
static const unsigned int nf_ct_generic_timeout = 600*HZ;
-static bool nf_generic_should_process(u8 proto)
-{
- switch (proto) {
-#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
- case IPPROTO_GRE:
- return false;
-#endif
- default:
- return true;
- }
-}
-
-static bool generic_pkt_to_tuple(const struct sk_buff *skb,
- unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple)
-{
- tuple->src.u.all = 0;
- tuple->dst.u.all = 0;
-
- return true;
-}
-
-/* Returns verdict for packet, or -1 for invalid. */
-static int generic_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
-{
- const unsigned int *timeout = nf_ct_timeout_lookup(ct);
-
- if (!nf_generic_should_process(nf_ct_protonum(ct))) {
- pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n",
- nf_ct_protonum(ct));
- return -NF_ACCEPT;
- }
-
- if (!timeout)
- timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
-
- nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
- return NF_ACCEPT;
-}
-
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
@@ -104,53 +60,16 @@ generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table generic_sysctl_table[] = {
- {
- .procname = "nf_conntrack_generic_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_generic_net *gn)
-{
-#ifdef CONFIG_SYSCTL
- pn->ctl_table = kmemdup(generic_sysctl_table,
- sizeof(generic_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &gn->timeout;
-#endif
- return 0;
-}
-
-static int generic_init_net(struct net *net)
+void nf_conntrack_generic_init_net(struct net *net)
{
struct nf_generic_net *gn = nf_generic_pernet(net);
- struct nf_proto_net *pn = &gn->pn;
gn->timeout = nf_ct_generic_timeout;
-
- return generic_kmemdup_sysctl_table(pn, gn);
-}
-
-static struct nf_proto_net *generic_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.generic.pn;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
{
.l4proto = 255,
- .pkt_to_tuple = generic_pkt_to_tuple,
- .packet = generic_packet,
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = generic_timeout_nlattr_to_obj,
@@ -160,6 +79,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
.nla_policy = generic_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = generic_init_net,
- .get_net_proto = generic_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 8899b51aad44..ee9ab10a32e4 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -48,24 +48,25 @@ static const unsigned int gre_timeouts[GRE_CT_MAX] = {
[GRE_CT_REPLIED] = 180*HZ,
};
-static unsigned int proto_gre_net_id __read_mostly;
+/* used when expectation is added */
+static DEFINE_SPINLOCK(keymap_lock);
-static inline struct netns_proto_gre *gre_pernet(struct net *net)
+static inline struct nf_gre_net *gre_pernet(struct net *net)
{
- return net_generic(net, proto_gre_net_id);
+ return &net->ct.nf_ct_proto.gre;
}
-static void nf_ct_gre_keymap_flush(struct net *net)
+void nf_ct_gre_keymap_flush(struct net *net)
{
- struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_gre_net *net_gre = gre_pernet(net);
struct nf_ct_gre_keymap *km, *tmp;
- write_lock_bh(&net_gre->keymap_lock);
+ spin_lock_bh(&keymap_lock);
list_for_each_entry_safe(km, tmp, &net_gre->keymap_list, list) {
- list_del(&km->list);
- kfree(km);
+ list_del_rcu(&km->list);
+ kfree_rcu(km, rcu);
}
- write_unlock_bh(&net_gre->keymap_lock);
+ spin_unlock_bh(&keymap_lock);
}
static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
@@ -81,18 +82,16 @@ static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
/* look up the source key for a given tuple */
static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t)
{
- struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_gre_net *net_gre = gre_pernet(net);
struct nf_ct_gre_keymap *km;
__be16 key = 0;
- read_lock_bh(&net_gre->keymap_lock);
- list_for_each_entry(km, &net_gre->keymap_list, list) {
+ list_for_each_entry_rcu(km, &net_gre->keymap_list, list) {
if (gre_key_cmpfn(km, t)) {
key = km->tuple.src.u.gre.key;
break;
}
}
- read_unlock_bh(&net_gre->keymap_lock);
pr_debug("lookup src key 0x%x for ", key);
nf_ct_dump_tuple(t);
@@ -105,21 +104,17 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t)
{
struct net *net = nf_ct_net(ct);
- struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_gre_net *net_gre = gre_pernet(net);
struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
struct nf_ct_gre_keymap **kmp, *km;
kmp = &ct_pptp_info->keymap[dir];
if (*kmp) {
/* check whether it's a retransmission */
- read_lock_bh(&net_gre->keymap_lock);
- list_for_each_entry(km, &net_gre->keymap_list, list) {
- if (gre_key_cmpfn(km, t) && km == *kmp) {
- read_unlock_bh(&net_gre->keymap_lock);
+ list_for_each_entry_rcu(km, &net_gre->keymap_list, list) {
+ if (gre_key_cmpfn(km, t) && km == *kmp)
return 0;
- }
}
- read_unlock_bh(&net_gre->keymap_lock);
pr_debug("trying to override keymap_%s for ct %p\n",
dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct);
return -EEXIST;
@@ -134,9 +129,9 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
pr_debug("adding new entry %p: ", km);
nf_ct_dump_tuple(&km->tuple);
- write_lock_bh(&net_gre->keymap_lock);
+ spin_lock_bh(&keymap_lock);
list_add_tail(&km->list, &net_gre->keymap_list);
- write_unlock_bh(&net_gre->keymap_lock);
+ spin_unlock_bh(&keymap_lock);
return 0;
}
@@ -145,32 +140,30 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add);
/* destroy the keymap entries associated with specified master ct */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
{
- struct net *net = nf_ct_net(ct);
- struct netns_proto_gre *net_gre = gre_pernet(net);
struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
enum ip_conntrack_dir dir;
pr_debug("entering for ct %p\n", ct);
- write_lock_bh(&net_gre->keymap_lock);
+ spin_lock_bh(&keymap_lock);
for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
if (ct_pptp_info->keymap[dir]) {
pr_debug("removing %p from list\n",
ct_pptp_info->keymap[dir]);
- list_del(&ct_pptp_info->keymap[dir]->list);
- kfree(ct_pptp_info->keymap[dir]);
+ list_del_rcu(&ct_pptp_info->keymap[dir]->list);
+ kfree_rcu(ct_pptp_info->keymap[dir], rcu);
ct_pptp_info->keymap[dir] = NULL;
}
}
- write_unlock_bh(&net_gre->keymap_lock);
+ spin_unlock_bh(&keymap_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy);
/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
/* gre hdr info to tuple */
-static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple)
+bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
+ struct net *net, struct nf_conntrack_tuple *tuple)
{
const struct pptp_gre_header *pgrehdr;
struct pptp_gre_header _pgrehdr;
@@ -216,15 +209,15 @@ static void gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
static unsigned int *gre_get_timeouts(struct net *net)
{
- return gre_pernet(net)->gre_timeouts;
+ return gre_pernet(net)->timeouts;
}
/* Returns verdict for packet, and may modify conntrack */
-static int gre_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_gre_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
if (state->pf != NFPROTO_IPV4)
return -NF_ACCEPT;
@@ -256,19 +249,6 @@ static int gre_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-/* Called when a conntrack entry has already been removed from the hashes
- * and is about to be deleted from memory */
-static void gre_destroy(struct nf_conn *ct)
-{
- struct nf_conn *master = ct->master;
- pr_debug(" entering\n");
-
- if (!master)
- pr_debug("no master !?!\n");
- else
- nf_ct_gre_keymap_destroy(master);
-}
-
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
@@ -278,13 +258,13 @@ static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
- struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_gre_net *net_gre = gre_pernet(net);
if (!timeouts)
timeouts = gre_get_timeouts(net);
/* set default timeouts for GRE. */
- timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED];
- timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED];
+ timeouts[GRE_CT_UNREPLIED] = net_gre->timeouts[GRE_CT_UNREPLIED];
+ timeouts[GRE_CT_REPLIED] = net_gre->timeouts[GRE_CT_REPLIED];
if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
timeouts[GRE_CT_UNREPLIED] =
@@ -320,69 +300,22 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table gre_sysctl_table[] = {
- {
- .procname = "nf_conntrack_gre_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_gre_timeout_stream",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {}
-};
-#endif
-
-static int gre_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *nf,
- struct netns_proto_gre *net_gre)
-{
-#ifdef CONFIG_SYSCTL
- int i;
-
- if (nf->ctl_table)
- return 0;
-
- nf->ctl_table = kmemdup(gre_sysctl_table,
- sizeof(gre_sysctl_table),
- GFP_KERNEL);
- if (!nf->ctl_table)
- return -ENOMEM;
-
- for (i = 0; i < GRE_CT_MAX; i++)
- nf->ctl_table[i].data = &net_gre->gre_timeouts[i];
-#endif
- return 0;
-}
-
-static int gre_init_net(struct net *net)
+void nf_conntrack_gre_init_net(struct net *net)
{
- struct netns_proto_gre *net_gre = gre_pernet(net);
- struct nf_proto_net *nf = &net_gre->nf;
+ struct nf_gre_net *net_gre = gre_pernet(net);
int i;
- rwlock_init(&net_gre->keymap_lock);
INIT_LIST_HEAD(&net_gre->keymap_list);
for (i = 0; i < GRE_CT_MAX; i++)
- net_gre->gre_timeouts[i] = gre_timeouts[i];
-
- return gre_kmemdup_sysctl_table(net, nf, net_gre);
+ net_gre->timeouts[i] = gre_timeouts[i];
}
/* protocol helper struct */
-static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre = {
.l4proto = IPPROTO_GRE,
- .pkt_to_tuple = gre_pkt_to_tuple,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = gre_print_conntrack,
#endif
- .packet = gre_packet,
- .destroy = gre_destroy,
- .me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
@@ -398,61 +331,4 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
.nla_policy = gre_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .net_id = &proto_gre_net_id,
- .init_net = gre_init_net,
};
-
-static int proto_gre_net_init(struct net *net)
-{
- int ret = 0;
-
- ret = nf_ct_l4proto_pernet_register_one(net,
- &nf_conntrack_l4proto_gre4);
- if (ret < 0)
- pr_err("nf_conntrack_gre4: pernet registration failed.\n");
- return ret;
-}
-
-static void proto_gre_net_exit(struct net *net)
-{
- nf_ct_l4proto_pernet_unregister_one(net, &nf_conntrack_l4proto_gre4);
- nf_ct_gre_keymap_flush(net);
-}
-
-static struct pernet_operations proto_gre_net_ops = {
- .init = proto_gre_net_init,
- .exit = proto_gre_net_exit,
- .id = &proto_gre_net_id,
- .size = sizeof(struct netns_proto_gre),
-};
-
-static int __init nf_ct_proto_gre_init(void)
-{
- int ret;
-
- BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
-
- ret = register_pernet_subsys(&proto_gre_net_ops);
- if (ret < 0)
- goto out_pernet;
- ret = nf_ct_l4proto_register_one(&nf_conntrack_l4proto_gre4);
- if (ret < 0)
- goto out_gre4;
-
- return 0;
-out_gre4:
- unregister_pernet_subsys(&proto_gre_net_ops);
-out_pernet:
- return ret;
-}
-
-static void __exit nf_ct_proto_gre_fini(void)
-{
- nf_ct_l4proto_unregister_one(&nf_conntrack_l4proto_gre4);
- unregister_pernet_subsys(&proto_gre_net_ops);
-}
-
-module_init(nf_ct_proto_gre_init);
-module_exit(nf_ct_proto_gre_fini);
-
-MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index de64d8a5fdfd..7df477996b16 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -25,8 +25,8 @@
static const unsigned int nf_ct_icmp_timeout = 30*HZ;
-static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple)
+bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
+ struct net *net, struct nf_conntrack_tuple *tuple)
{
const struct icmphdr *hp;
struct icmphdr _hdr;
@@ -54,8 +54,8 @@ static const u_int8_t invmap[] = {
[ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1
};
-static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
+bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
{
if (orig->dst.u.icmp.type >= sizeof(invmap) ||
!invmap[orig->dst.u.icmp.type])
@@ -68,11 +68,10 @@ static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
}
/* Returns verdict for packet, or -1 for invalid. */
-static int icmp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_icmp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
@@ -110,7 +109,6 @@ icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_conntrack_tuple innertuple, origtuple;
- const struct nf_conntrack_l4proto *innerproto;
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_zone *zone;
enum ip_conntrack_info ctinfo;
@@ -128,12 +126,9 @@ icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb,
return -NF_ACCEPT;
}
- /* rcu_read_lock()ed by nf_hook_thresh */
- innerproto = __nf_ct_l4proto_find(origtuple.dst.protonum);
-
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
- if (!nf_ct_invert_tuple(&innertuple, &origtuple, innerproto)) {
+ if (!nf_ct_invert_tuple(&innertuple, &origtuple)) {
pr_debug("icmp_error_message: no match\n");
return -NF_ACCEPT;
}
@@ -303,56 +298,16 @@ icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table icmp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_icmp_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_icmp_net *in)
-{
-#ifdef CONFIG_SYSCTL
- pn->ctl_table = kmemdup(icmp_sysctl_table,
- sizeof(icmp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &in->timeout;
-#endif
- return 0;
-}
-
-static int icmp_init_net(struct net *net)
+void nf_conntrack_icmp_init_net(struct net *net)
{
struct nf_icmp_net *in = nf_icmp_pernet(net);
- struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmp_timeout;
-
- return icmp_kmemdup_sysctl_table(pn, in);
-}
-
-static struct nf_proto_net *icmp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmp.pn;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
{
.l4proto = IPPROTO_ICMP,
- .pkt_to_tuple = icmp_pkt_to_tuple,
- .invert_tuple = icmp_invert_tuple,
- .packet = icmp_packet,
- .destroy = NULL,
- .me = NULL,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = icmp_tuple_to_nlattr,
.nlattr_tuple_size = icmp_nlattr_tuple_size,
@@ -368,6 +323,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
.nla_policy = icmp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = icmp_init_net,
- .get_net_proto = icmp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index a15eefb8e317..bec4a3211658 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -30,10 +30,10 @@
static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
-static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
- unsigned int dataoff,
- struct net *net,
- struct nf_conntrack_tuple *tuple)
+bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple)
{
const struct icmp6hdr *hp;
struct icmp6hdr _hdr;
@@ -67,8 +67,8 @@ static const u_int8_t noct_valid_new[] = {
[ICMPV6_MLD2_REPORT - 130] = 1
};
-static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
+bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
{
int type = orig->dst.u.icmp.type - 128;
if (type < 0 || type >= sizeof(invmap) || !invmap[type])
@@ -86,11 +86,10 @@ static unsigned int *icmpv6_get_timeouts(struct net *net)
}
/* Returns verdict for packet, or -1 for invalid. */
-static int icmpv6_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_icmpv6_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
unsigned int *timeout = nf_ct_timeout_lookup(ct);
static const u8 valid_new[] = {
@@ -131,7 +130,6 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
{
struct nf_conntrack_tuple intuple, origtuple;
const struct nf_conntrack_tuple_hash *h;
- const struct nf_conntrack_l4proto *inproto;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
@@ -147,12 +145,9 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
return -NF_ACCEPT;
}
- /* rcu_read_lock()ed by nf_hook_thresh */
- inproto = __nf_ct_l4proto_find(origtuple.dst.protonum);
-
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
- if (!nf_ct_invert_tuple(&intuple, &origtuple, inproto)) {
+ if (!nf_ct_invert_tuple(&intuple, &origtuple)) {
pr_debug("icmpv6_error: Can't invert tuple\n");
return -NF_ACCEPT;
}
@@ -314,54 +309,16 @@ icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table icmpv6_sysctl_table[] = {
- {
- .procname = "nf_conntrack_icmpv6_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_icmp_net *in)
-{
-#ifdef CONFIG_SYSCTL
- pn->ctl_table = kmemdup(icmpv6_sysctl_table,
- sizeof(icmpv6_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &in->timeout;
-#endif
- return 0;
-}
-
-static int icmpv6_init_net(struct net *net)
+void nf_conntrack_icmpv6_init_net(struct net *net)
{
struct nf_icmp_net *in = nf_icmpv6_pernet(net);
- struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmpv6_timeout;
-
- return icmpv6_kmemdup_sysctl_table(pn, in);
-}
-
-static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmpv6.pn;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
{
.l4proto = IPPROTO_ICMPV6,
- .pkt_to_tuple = icmpv6_pkt_to_tuple,
- .invert_tuple = icmpv6_invert_tuple,
- .packet = icmpv6_packet,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = icmpv6_tuple_to_nlattr,
.nlattr_tuple_size = icmpv6_nlattr_tuple_size,
@@ -377,6 +334,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
.nla_policy = icmpv6_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = icmpv6_init_net,
- .get_net_proto = icmpv6_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index d53e3e78f605..a7818101ad80 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -357,11 +357,11 @@ out_invalid:
}
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
-static int sctp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
enum sctp_conntrack new_state, old_state;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -642,116 +642,18 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table sctp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_sctp_timeout_closed",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_cookie_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_cookie_echoed",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_established",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_shutdown_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_shutdown_recd",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_heartbeat_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif
-
-static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_sctp_net *sn)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table)
- return 0;
-
- pn->ctl_table = kmemdup(sctp_sysctl_table,
- sizeof(sctp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
- pn->ctl_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
- pn->ctl_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
- pn->ctl_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
- pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
- pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
- pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
- pn->ctl_table[7].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_SENT];
- pn->ctl_table[8].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_ACKED];
-#endif
- return 0;
-}
-
-static int sctp_init_net(struct net *net)
+void nf_conntrack_sctp_init_net(struct net *net)
{
struct nf_sctp_net *sn = nf_sctp_pernet(net);
- struct nf_proto_net *pn = &sn->pn;
-
- if (!pn->users) {
- int i;
-
- for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
- sn->timeouts[i] = sctp_timeouts[i];
-
- /* timeouts[0] is unused, init it so ->timeouts[0] contains
- * 'new' timeout, like udp or icmp.
- */
- sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
- }
+ int i;
- return sctp_kmemdup_sysctl_table(pn, sn);
-}
+ for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
+ sn->timeouts[i] = sctp_timeouts[i];
-static struct nf_proto_net *sctp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.sctp.pn;
+ /* timeouts[0] is unused, init it so ->timeouts[0] contains
+ * 'new' timeout, like udp or icmp.
+ */
+ sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = {
@@ -759,9 +661,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = {
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = sctp_print_conntrack,
#endif
- .packet = sctp_packet,
.can_early_drop = sctp_can_early_drop,
- .me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_size = SCTP_NLATTR_SIZE,
.to_nlattr = sctp_to_nlattr,
@@ -780,6 +680,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = {
.nla_policy = sctp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = sctp_init_net,
- .get_net_proto = sctp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 4dcbd51a8e97..01c748fa8913 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -829,11 +829,11 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
}
/* Returns verdict for packet, or -1 for invalid. */
-static int tcp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
@@ -1387,146 +1387,21 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table tcp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_tcp_timeout_syn_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_syn_recv",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_established",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_fin_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_close_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_last_ack",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_time_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_close",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_max_retrans",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_unacknowledged",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_loose",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "nf_conntrack_tcp_be_liberal",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "nf_conntrack_tcp_max_retrans",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_tcp_net *tn)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table)
- return 0;
-
- pn->ctl_table = kmemdup(tcp_sysctl_table,
- sizeof(tcp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
- pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
- pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
- pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
- pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
- pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
- pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
- pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
- pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
- pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK];
- pn->ctl_table[10].data = &tn->tcp_loose;
- pn->ctl_table[11].data = &tn->tcp_be_liberal;
- pn->ctl_table[12].data = &tn->tcp_max_retrans;
-#endif
- return 0;
-}
-
-static int tcp_init_net(struct net *net)
+void nf_conntrack_tcp_init_net(struct net *net)
{
struct nf_tcp_net *tn = nf_tcp_pernet(net);
- struct nf_proto_net *pn = &tn->pn;
-
- if (!pn->users) {
- int i;
-
- for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
- tn->timeouts[i] = tcp_timeouts[i];
-
- /* timeouts[0] is unused, make it same as SYN_SENT so
- * ->timeouts[0] contains 'new' timeout, like udp or icmp.
- */
- tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
- tn->tcp_loose = nf_ct_tcp_loose;
- tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
- tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
- }
+ int i;
- return tcp_kmemdup_sysctl_table(pn, tn);
-}
+ for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
+ tn->timeouts[i] = tcp_timeouts[i];
-static struct nf_proto_net *tcp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.tcp.pn;
+ /* timeouts[0] is unused, make it same as SYN_SENT so
+ * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+ */
+ tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
+ tn->tcp_loose = nf_ct_tcp_loose;
+ tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
+ tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
@@ -1535,7 +1410,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = tcp_print_conntrack,
#endif
- .packet = tcp_packet,
.can_early_drop = tcp_can_early_drop,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = tcp_to_nlattr,
@@ -1556,6 +1430,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = tcp_init_net,
- .get_net_proto = tcp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index b4f5d5e82031..951366dfbec3 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -85,11 +85,11 @@ static bool udp_error(struct sk_buff *skb,
}
/* Returns verdict for packet, and may modify conntracktype */
-static int udp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_udp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
unsigned int *timeouts;
@@ -177,11 +177,11 @@ static bool udplite_error(struct sk_buff *skb,
}
/* Returns verdict for packet, and may modify conntracktype */
-static int udplite_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_udplite_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
unsigned int *timeouts;
@@ -260,66 +260,19 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table udp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_udp_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_udp_timeout_stream",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_udp_net *un)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table)
- return 0;
- pn->ctl_table = kmemdup(udp_sysctl_table,
- sizeof(udp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
- pn->ctl_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
- pn->ctl_table[1].data = &un->timeouts[UDP_CT_REPLIED];
-#endif
- return 0;
-}
-
-static int udp_init_net(struct net *net)
+void nf_conntrack_udp_init_net(struct net *net)
{
struct nf_udp_net *un = nf_udp_pernet(net);
- struct nf_proto_net *pn = &un->pn;
+ int i;
- if (!pn->users) {
- int i;
-
- for (i = 0; i < UDP_CT_MAX; i++)
- un->timeouts[i] = udp_timeouts[i];
- }
-
- return udp_kmemdup_sysctl_table(pn, un);
-}
-
-static struct nf_proto_net *udp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.udp.pn;
+ for (i = 0; i < UDP_CT_MAX; i++)
+ un->timeouts[i] = udp_timeouts[i];
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp =
{
.l4proto = IPPROTO_UDP,
.allow_clash = true,
- .packet = udp_packet,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
@@ -335,8 +288,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp =
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = udp_init_net,
- .get_net_proto = udp_get_net_proto,
};
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
@@ -344,7 +295,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite =
{
.l4proto = IPPROTO_UDPLITE,
.allow_clash = true,
- .packet = udplite_packet,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
@@ -360,7 +310,5 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite =
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = udp_init_net,
- .get_net_proto = udp_get_net_proto,
};
#endif
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index b6177fd73304..c2ae14c720b4 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -24,6 +24,10 @@
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <linux/rculist_nulls.h>
+static bool enable_hooks __read_mostly;
+MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks");
+module_param(enable_hooks, bool, 0000);
+
unsigned int nf_conntrack_net_id __read_mostly;
#ifdef CONFIG_NF_CONNTRACK_PROCFS
@@ -310,8 +314,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (!net_eq(nf_ct_net(ct), net))
goto release;
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
- WARN_ON(!l4proto);
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
ret = -ENOSPC;
seq_printf(s, "%-8s %u %-8s %u ",
@@ -547,8 +550,55 @@ enum nf_ct_sysctl_index {
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
NF_SYSCTL_CT_TIMESTAMP,
#endif
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK,
+ NF_SYSCTL_CT_PROTO_TCP_LOOSE,
+ NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
+ NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6,
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT,
+ NF_SYSCTL_CT_PROTO_DCCP_LOOSE,
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
+#endif
+
+ __NF_SYSCTL_CT_LAST_SYSCTL,
};
+#define NF_SYSCTL_CT_LAST_SYSCTL (__NF_SYSCTL_CT_LAST_SYSCTL + 1)
+
static struct ctl_table nf_ct_sysctl_table[] = {
[NF_SYSCTL_CT_MAX] = {
.procname = "nf_conntrack_max",
@@ -626,7 +676,235 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.proc_handler = proc_dointvec,
},
#endif
- { }
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC] = {
+ .procname = "nf_conntrack_generic_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT] = {
+ .procname = "nf_conntrack_tcp_timeout_syn_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV] = {
+ .procname = "nf_conntrack_tcp_timeout_syn_recv",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED] = {
+ .procname = "nf_conntrack_tcp_timeout_established",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT] = {
+ .procname = "nf_conntrack_tcp_timeout_fin_wait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT] = {
+ .procname = "nf_conntrack_tcp_timeout_close_wait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK] = {
+ .procname = "nf_conntrack_tcp_timeout_last_ack",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT] = {
+ .procname = "nf_conntrack_tcp_timeout_time_wait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE] = {
+ .procname = "nf_conntrack_tcp_timeout_close",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS] = {
+ .procname = "nf_conntrack_tcp_timeout_max_retrans",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK] = {
+ .procname = "nf_conntrack_tcp_timeout_unacknowledged",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TCP_LOOSE] = {
+ .procname = "nf_conntrack_tcp_loose",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ [NF_SYSCTL_CT_PROTO_TCP_LIBERAL] = {
+ .procname = "nf_conntrack_tcp_be_liberal",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ [NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = {
+ .procname = "nf_conntrack_tcp_max_retrans",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP] = {
+ .procname = "nf_conntrack_udp_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM] = {
+ .procname = "nf_conntrack_udp_timeout_stream",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = {
+ .procname = "nf_conntrack_icmp_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6] = {
+ .procname = "nf_conntrack_icmpv6_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED] = {
+ .procname = "nf_conntrack_sctp_timeout_closed",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT] = {
+ .procname = "nf_conntrack_sctp_timeout_cookie_wait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED] = {
+ .procname = "nf_conntrack_sctp_timeout_cookie_echoed",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED] = {
+ .procname = "nf_conntrack_sctp_timeout_established",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT] = {
+ .procname = "nf_conntrack_sctp_timeout_shutdown_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD] = {
+ .procname = "nf_conntrack_sctp_timeout_shutdown_recd",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = {
+ .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT] = {
+ .procname = "nf_conntrack_sctp_timeout_heartbeat_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
+ .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
+ .procname = "nf_conntrack_dccp_timeout_request",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND] = {
+ .procname = "nf_conntrack_dccp_timeout_respond",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN] = {
+ .procname = "nf_conntrack_dccp_timeout_partopen",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN] = {
+ .procname = "nf_conntrack_dccp_timeout_open",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ] = {
+ .procname = "nf_conntrack_dccp_timeout_closereq",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING] = {
+ .procname = "nf_conntrack_dccp_timeout_closing",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT] = {
+ .procname = "nf_conntrack_dccp_timeout_timewait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_DCCP_LOOSE] = {
+ .procname = "nf_conntrack_dccp_loose",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_GRE] = {
+ .procname = "nf_conntrack_gre_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM] = {
+ .procname = "nf_conntrack_gre_timeout_stream",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#endif
+ {}
};
static struct ctl_table nf_ct_netfilter_table[] = {
@@ -640,14 +918,103 @@ static struct ctl_table nf_ct_netfilter_table[] = {
{ }
};
+static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
+ struct ctl_table *table)
+{
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+#define XASSIGN(XNAME, tn) \
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ ## XNAME].data = \
+ &(tn)->timeouts[TCP_CONNTRACK_ ## XNAME]
+
+ XASSIGN(SYN_SENT, tn);
+ XASSIGN(SYN_RECV, tn);
+ XASSIGN(ESTABLISHED, tn);
+ XASSIGN(FIN_WAIT, tn);
+ XASSIGN(CLOSE_WAIT, tn);
+ XASSIGN(LAST_ACK, tn);
+ XASSIGN(TIME_WAIT, tn);
+ XASSIGN(CLOSE, tn);
+ XASSIGN(RETRANS, tn);
+ XASSIGN(UNACK, tn);
+#undef XASSIGN
+#define XASSIGN(XNAME, rval) \
+ table[NF_SYSCTL_CT_PROTO_TCP_ ## XNAME].data = (rval)
+
+ XASSIGN(LOOSE, &tn->tcp_loose);
+ XASSIGN(LIBERAL, &tn->tcp_be_liberal);
+ XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
+#undef XASSIGN
+}
+
+static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
+ struct ctl_table *table)
+{
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ struct nf_sctp_net *sn = nf_sctp_pernet(net);
+
+#define XASSIGN(XNAME, sn) \
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ ## XNAME].data = \
+ &(sn)->timeouts[SCTP_CONNTRACK_ ## XNAME]
+
+ XASSIGN(CLOSED, sn);
+ XASSIGN(COOKIE_WAIT, sn);
+ XASSIGN(COOKIE_ECHOED, sn);
+ XASSIGN(ESTABLISHED, sn);
+ XASSIGN(SHUTDOWN_SENT, sn);
+ XASSIGN(SHUTDOWN_RECD, sn);
+ XASSIGN(SHUTDOWN_ACK_SENT, sn);
+ XASSIGN(HEARTBEAT_SENT, sn);
+ XASSIGN(HEARTBEAT_ACKED, sn);
+#undef XASSIGN
+#endif
+}
+
+static void nf_conntrack_standalone_init_dccp_sysctl(struct net *net,
+ struct ctl_table *table)
+{
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ struct nf_dccp_net *dn = nf_dccp_pernet(net);
+
+#define XASSIGN(XNAME, dn) \
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_ ## XNAME].data = \
+ &(dn)->dccp_timeout[CT_DCCP_ ## XNAME]
+
+ XASSIGN(REQUEST, dn);
+ XASSIGN(RESPOND, dn);
+ XASSIGN(PARTOPEN, dn);
+ XASSIGN(OPEN, dn);
+ XASSIGN(CLOSEREQ, dn);
+ XASSIGN(CLOSING, dn);
+ XASSIGN(TIMEWAIT, dn);
+#undef XASSIGN
+
+ table[NF_SYSCTL_CT_PROTO_DCCP_LOOSE].data = &dn->dccp_loose;
+#endif
+}
+
+static void nf_conntrack_standalone_init_gre_sysctl(struct net *net,
+ struct ctl_table *table)
+{
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ struct nf_gre_net *gn = nf_gre_pernet(net);
+
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE].data = &gn->timeouts[GRE_CT_UNREPLIED];
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM].data = &gn->timeouts[GRE_CT_REPLIED];
+#endif
+}
+
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
+ struct nf_udp_net *un = nf_udp_pernet(net);
struct ctl_table *table;
+ BUILD_BUG_ON(ARRAY_SIZE(nf_ct_sysctl_table) != NF_SYSCTL_CT_LAST_SYSCTL);
+
table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
GFP_KERNEL);
if (!table)
- goto out_kmemdup;
+ return -ENOMEM;
table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
@@ -655,6 +1022,16 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
#ifdef CONFIG_NF_CONNTRACK_EVENTS
table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
#endif
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED];
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
+
+ nf_conntrack_standalone_init_tcp_sysctl(net, table);
+ nf_conntrack_standalone_init_sctp_sysctl(net, table);
+ nf_conntrack_standalone_init_dccp_sysctl(net, table);
+ nf_conntrack_standalone_init_gre_sysctl(net, table);
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns) {
@@ -680,7 +1057,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
out_unregister_netfilter:
kfree(table);
-out_kmemdup:
return -ENOMEM;
}
@@ -703,31 +1079,47 @@ static void nf_conntrack_standalone_fini_sysctl(struct net *net)
}
#endif /* CONFIG_SYSCTL */
+static void nf_conntrack_fini_net(struct net *net)
+{
+ if (enable_hooks)
+ nf_ct_netns_put(net, NFPROTO_INET);
+
+ nf_conntrack_standalone_fini_proc(net);
+ nf_conntrack_standalone_fini_sysctl(net);
+}
+
static int nf_conntrack_pernet_init(struct net *net)
{
int ret;
- ret = nf_conntrack_init_net(net);
+ net->ct.sysctl_checksum = 1;
+
+ ret = nf_conntrack_standalone_init_sysctl(net);
if (ret < 0)
- goto out_init;
+ return ret;
ret = nf_conntrack_standalone_init_proc(net);
if (ret < 0)
goto out_proc;
- net->ct.sysctl_checksum = 1;
- net->ct.sysctl_log_invalid = 0;
- ret = nf_conntrack_standalone_init_sysctl(net);
+ ret = nf_conntrack_init_net(net);
if (ret < 0)
- goto out_sysctl;
+ goto out_init_net;
+
+ if (enable_hooks) {
+ ret = nf_ct_netns_get(net, NFPROTO_INET);
+ if (ret < 0)
+ goto out_hooks;
+ }
return 0;
-out_sysctl:
+out_hooks:
+ nf_conntrack_cleanup_net(net);
+out_init_net:
nf_conntrack_standalone_fini_proc(net);
out_proc:
- nf_conntrack_cleanup_net(net);
-out_init:
+ nf_conntrack_standalone_fini_sysctl(net);
return ret;
}
@@ -735,10 +1127,9 @@ static void nf_conntrack_pernet_exit(struct list_head *net_exit_list)
{
struct net *net;
- list_for_each_entry(net, net_exit_list, exit_list) {
- nf_conntrack_standalone_fini_sysctl(net);
- nf_conntrack_standalone_fini_proc(net);
- }
+ list_for_each_entry(net, net_exit_list, exit_list)
+ nf_conntrack_fini_net(net);
+
nf_conntrack_cleanup_net_list(net_exit_list);
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index c0c72ae9df42..7aabfd4b1e50 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -121,7 +121,7 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
if (l4num == IPPROTO_TCP)
flow_offload_fixup_tcp(&ct->proto.tcp);
- l4proto = __nf_ct_l4proto_find(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
if (!l4proto)
return;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index d159e9e7835b..35e61038ae96 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -146,7 +146,7 @@ hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
}
/* Is this tuple already taken? (not by us) */
-int
+static int
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
@@ -158,10 +158,9 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
*/
struct nf_conntrack_tuple reply;
- nf_ct_invert_tuplepr(&reply, tuple);
+ nf_ct_invert_tuple(&reply, tuple);
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
}
-EXPORT_SYMBOL(nf_nat_used_tuple);
static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
const struct nf_nat_range2 *range)
@@ -253,7 +252,7 @@ find_appropriate_src(struct net *net,
net_eq(net, nf_ct_net(ct)) &&
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
/* Copy source part from reply tuple. */
- nf_ct_invert_tuplepr(result,
+ nf_ct_invert_tuple(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
result->dst = tuple->dst;
@@ -560,8 +559,8 @@ nf_nat_setup_info(struct nf_conn *ct,
* manipulations (future optimization: if num_manips == 0,
* orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
*/
- nf_ct_invert_tuplepr(&curr_tuple,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ nf_ct_invert_tuple(&curr_tuple,
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
@@ -569,7 +568,7 @@ nf_nat_setup_info(struct nf_conn *ct,
struct nf_conntrack_tuple reply;
/* Alter conntrack table so will recognize replies. */
- nf_ct_invert_tuplepr(&reply, &new_tuple);
+ nf_ct_invert_tuple(&reply, &new_tuple);
nf_conntrack_alter_reply(ct, &reply);
/* Non-atomic: we own this at the moment. */
@@ -640,7 +639,7 @@ static unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
struct nf_conntrack_tuple target;
/* We are aiming to look like inverse of other direction. */
- nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
l3proto = __nf_nat_l3proto_find(target.src.l3num);
if (!l3proto->manip_pkt(skb, 0, &target, mtype))
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fb07f6cfc719..e92bedd09cde 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -37,10 +37,16 @@ enum {
NFT_VALIDATE_DO,
};
+static struct rhltable nft_objname_ht;
+
static u32 nft_chain_hash(const void *data, u32 len, u32 seed);
static u32 nft_chain_hash_obj(const void *data, u32 len, u32 seed);
static int nft_chain_hash_cmp(struct rhashtable_compare_arg *, const void *);
+static u32 nft_objname_hash(const void *data, u32 len, u32 seed);
+static u32 nft_objname_hash_obj(const void *data, u32 len, u32 seed);
+static int nft_objname_hash_cmp(struct rhashtable_compare_arg *, const void *);
+
static const struct rhashtable_params nft_chain_ht_params = {
.head_offset = offsetof(struct nft_chain, rhlhead),
.key_offset = offsetof(struct nft_chain, name),
@@ -51,6 +57,15 @@ static const struct rhashtable_params nft_chain_ht_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params nft_objname_ht_params = {
+ .head_offset = offsetof(struct nft_object, rhlhead),
+ .key_offset = offsetof(struct nft_object, key),
+ .hashfn = nft_objname_hash,
+ .obj_hashfn = nft_objname_hash_obj,
+ .obj_cmpfn = nft_objname_hash_cmp,
+ .automatic_shrinking = true,
+};
+
static void nft_validate_state_update(struct net *net, u8 new_validate_state)
{
switch (net->nft.validate_state) {
@@ -814,6 +829,34 @@ static int nft_chain_hash_cmp(struct rhashtable_compare_arg *arg,
return strcmp(chain->name, name);
}
+static u32 nft_objname_hash(const void *data, u32 len, u32 seed)
+{
+ const struct nft_object_hash_key *k = data;
+
+ seed ^= hash_ptr(k->table, 32);
+
+ return jhash(k->name, strlen(k->name), seed);
+}
+
+static u32 nft_objname_hash_obj(const void *data, u32 len, u32 seed)
+{
+ const struct nft_object *obj = data;
+
+ return nft_objname_hash(&obj->key, 0, seed);
+}
+
+static int nft_objname_hash_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct nft_object_hash_key *k = arg->key;
+ const struct nft_object *obj = ptr;
+
+ if (obj->key.table != k->table)
+ return -1;
+
+ return strcmp(obj->key.name, k->name);
+}
+
static int nf_tables_newtable(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[],
@@ -1070,7 +1113,7 @@ nft_chain_lookup_byhandle(const struct nft_table *table, u64 handle, u8 genmask)
return ERR_PTR(-ENOENT);
}
-static bool lockdep_commit_lock_is_held(struct net *net)
+static bool lockdep_commit_lock_is_held(const struct net *net)
{
#ifdef CONFIG_PROVE_LOCKING
return lockdep_is_held(&net->nft.commit_mutex);
@@ -2565,6 +2608,9 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
return 0;
}
+static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ const struct nlattr *nla);
+
#define NFT_RULE_MAXEXPRS 128
static int nf_tables_newrule(struct net *net, struct sock *nlsk,
@@ -2634,6 +2680,12 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
return PTR_ERR(old_rule);
}
+ } else if (nla[NFTA_RULE_POSITION_ID]) {
+ old_rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_POSITION_ID]);
+ if (IS_ERR(old_rule)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION_ID]);
+ return PTR_ERR(old_rule);
+ }
}
}
@@ -3851,7 +3903,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) &&
nla_put_string(skb, NFTA_SET_ELEM_OBJREF,
- (*nft_set_ext_obj(ext))->name) < 0)
+ (*nft_set_ext_obj(ext))->key.name) < 0)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
@@ -4384,7 +4436,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -EINVAL;
goto err2;
}
- obj = nft_obj_lookup(ctx->table, nla[NFTA_SET_ELEM_OBJREF],
+ obj = nft_obj_lookup(ctx->net, ctx->table,
+ nla[NFTA_SET_ELEM_OBJREF],
set->objtype, genmask);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -4819,18 +4872,36 @@ void nft_unregister_obj(struct nft_object_type *obj_type)
}
EXPORT_SYMBOL_GPL(nft_unregister_obj);
-struct nft_object *nft_obj_lookup(const struct nft_table *table,
+struct nft_object *nft_obj_lookup(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla, u32 objtype,
u8 genmask)
{
+ struct nft_object_hash_key k = { .table = table };
+ char search[NFT_OBJ_MAXNAMELEN];
+ struct rhlist_head *tmp, *list;
struct nft_object *obj;
- list_for_each_entry_rcu(obj, &table->objects, list) {
- if (!nla_strcmp(nla, obj->name) &&
- objtype == obj->ops->type->type &&
- nft_active_genmask(obj, genmask))
+ nla_strlcpy(search, nla, sizeof(search));
+ k.name = search;
+
+ WARN_ON_ONCE(!rcu_read_lock_held() &&
+ !lockdep_commit_lock_is_held(net));
+
+ rcu_read_lock();
+ list = rhltable_lookup(&nft_objname_ht, &k, nft_objname_ht_params);
+ if (!list)
+ goto out;
+
+ rhl_for_each_entry_rcu(obj, tmp, list, rhlhead) {
+ if (objtype == obj->ops->type->type &&
+ nft_active_genmask(obj, genmask)) {
+ rcu_read_unlock();
return obj;
+ }
}
+out:
+ rcu_read_unlock();
return ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(nft_obj_lookup);
@@ -4988,7 +5059,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
- obj = nft_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask);
+ obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
if (err != -ENOENT) {
@@ -5014,11 +5085,11 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
err = PTR_ERR(obj);
goto err1;
}
- obj->table = table;
+ obj->key.table = table;
obj->handle = nf_tables_alloc_handle(table);
- obj->name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL);
- if (!obj->name) {
+ obj->key.name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL);
+ if (!obj->key.name) {
err = -ENOMEM;
goto err2;
}
@@ -5027,11 +5098,20 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
if (err < 0)
goto err3;
+ err = rhltable_insert(&nft_objname_ht, &obj->rhlhead,
+ nft_objname_ht_params);
+ if (err < 0)
+ goto err4;
+
list_add_tail_rcu(&obj->list, &table->objects);
table->use++;
return 0;
+err4:
+ /* queued in transaction log */
+ INIT_LIST_HEAD(&obj->list);
+ return err;
err3:
- kfree(obj->name);
+ kfree(obj->key.name);
err2:
if (obj->ops->destroy)
obj->ops->destroy(&ctx, obj);
@@ -5060,7 +5140,7 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
- nla_put_string(skb, NFTA_OBJ_NAME, obj->name) ||
+ nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) ||
nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset) ||
@@ -5215,7 +5295,7 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
- obj = nft_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask);
+ obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
return PTR_ERR(obj);
@@ -5246,7 +5326,7 @@ static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
obj->ops->destroy(ctx, obj);
module_put(obj->ops->type->owner);
- kfree(obj->name);
+ kfree(obj->key.name);
kfree(obj);
}
@@ -5280,7 +5360,7 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
obj = nft_obj_lookup_byhandle(table, attr, objtype, genmask);
} else {
attr = nla[NFTA_OBJ_NAME];
- obj = nft_obj_lookup(table, attr, objtype, genmask);
+ obj = nft_obj_lookup(net, table, attr, objtype, genmask);
}
if (IS_ERR(obj)) {
@@ -5297,7 +5377,7 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
return nft_delobj(&ctx, obj);
}
-void nft_obj_notify(struct net *net, struct nft_table *table,
+void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq, int event,
int family, int report, gfp_t gfp)
{
@@ -6404,6 +6484,12 @@ static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
nf_tables_commit_chain_free_rules_old(g0);
}
+static void nft_obj_del(struct nft_object *obj)
+{
+ rhltable_remove(&nft_objname_ht, &obj->rhlhead, nft_objname_ht_params);
+ list_del_rcu(&obj->list);
+}
+
static void nft_chain_del(struct nft_chain *chain)
{
struct nft_table *table = chain->table;
@@ -6580,7 +6666,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_trans_destroy(trans);
break;
case NFT_MSG_DELOBJ:
- list_del_rcu(&nft_trans_obj(trans)->list);
+ nft_obj_del(nft_trans_obj(trans));
nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans),
NFT_MSG_DELOBJ);
break;
@@ -6716,7 +6802,7 @@ static int __nf_tables_abort(struct net *net)
break;
case NFT_MSG_NEWOBJ:
trans->ctx.table->use--;
- list_del_rcu(&nft_trans_obj(trans)->list);
+ nft_obj_del(nft_trans_obj(trans));
break;
case NFT_MSG_DELOBJ:
trans->ctx.table->use++;
@@ -7330,7 +7416,7 @@ static void __nft_release_tables(struct net *net)
nft_set_destroy(set);
}
list_for_each_entry_safe(obj, ne, &table->objects, list) {
- list_del(&obj->list);
+ nft_obj_del(obj);
table->use--;
nft_obj_destroy(&ctx, obj);
}
@@ -7392,12 +7478,18 @@ static int __init nf_tables_module_init(void)
if (err < 0)
goto err3;
+ err = rhltable_init(&nft_objname_ht, &nft_objname_ht_params);
+ if (err < 0)
+ goto err4;
+
/* must be last */
err = nfnetlink_subsys_register(&nf_tables_subsys);
if (err < 0)
- goto err4;
+ goto err5;
return err;
+err5:
+ rhltable_destroy(&nft_objname_ht);
err4:
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
err3:
@@ -7417,6 +7509,7 @@ static void __exit nf_tables_module_exit(void)
unregister_pernet_subsys(&nf_tables_net_ops);
cancel_work_sync(&trans_destroy_work);
rcu_barrier();
+ rhltable_destroy(&nft_objname_ht);
nf_tables_core_module_exit();
}
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index a50500232b0a..2a00aef7b6d4 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -124,14 +124,25 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
struct nft_regs *regs,
struct nft_pktinfo *pkt)
{
+#ifdef CONFIG_RETPOLINE
unsigned long e = (unsigned long)expr->ops->eval;
-
- if (e == (unsigned long)nft_meta_get_eval)
- nft_meta_get_eval(expr, regs, pkt);
- else if (e == (unsigned long)nft_lookup_eval)
- nft_lookup_eval(expr, regs, pkt);
- else
- expr->ops->eval(expr, regs, pkt);
+#define X(e, fun) \
+ do { if ((e) == (unsigned long)(fun)) \
+ return fun(expr, regs, pkt); } while (0)
+
+ X(e, nft_payload_eval);
+ X(e, nft_cmp_eval);
+ X(e, nft_meta_get_eval);
+ X(e, nft_lookup_eval);
+ X(e, nft_range_eval);
+ X(e, nft_immediate_eval);
+ X(e, nft_byteorder_eval);
+ X(e, nft_dynset_eval);
+ X(e, nft_rt_get_eval);
+ X(e, nft_bitwise_eval);
+#undef X
+#endif /* CONFIG_RETPOLINE */
+ expr->ops->eval(expr, regs, pkt);
}
unsigned int
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 109b0d27345a..c69b11ca5aad 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -122,7 +122,7 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
return -EBUSY;
}
- l4proto = nf_ct_l4proto_find_get(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
/* This protocol is not supportted, skip. */
if (l4proto->l4proto != l4num) {
@@ -152,7 +152,6 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
err:
kfree(timeout);
err_proto_put:
- nf_ct_l4proto_put(l4proto);
return ret;
}
@@ -302,7 +301,6 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
if (refcount_dec_if_one(&timeout->refcnt)) {
/* We are protected by nfnl mutex. */
list_del_rcu(&timeout->head);
- nf_ct_l4proto_put(timeout->timeout.l4proto);
nf_ct_untimeout(net, &timeout->timeout);
kfree_rcu(timeout, rcu_head);
} else {
@@ -359,7 +357,7 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
return -EINVAL;
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
- l4proto = nf_ct_l4proto_find_get(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
/* This protocol is not supported, skip. */
if (l4proto->l4proto != l4num) {
@@ -372,10 +370,8 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
if (ret < 0)
goto err;
- nf_ct_l4proto_put(l4proto);
return 0;
err:
- nf_ct_l4proto_put(l4proto);
return ret;
}
@@ -442,7 +438,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
- l4proto = nf_ct_l4proto_find_get(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
err = -EOPNOTSUPP;
if (l4proto->l4proto != l4num)
@@ -474,12 +470,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
break;
case IPPROTO_GRE:
#ifdef CONFIG_NF_CT_PROTO_GRE
- if (l4proto->net_id) {
- struct netns_proto_gre *net_gre;
-
- net_gre = net_generic(net, *l4proto->net_id);
- timeouts = net_gre->gre_timeouts;
- }
+ timeouts = nf_gre_pernet(net)->timeouts;
#endif
break;
case 255:
@@ -516,7 +507,6 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
/* this avoids a loop in nfnetlink. */
return ret == -EAGAIN ? -ENOBUFS : ret;
err:
- nf_ct_l4proto_put(l4proto);
return err;
}
@@ -597,7 +587,6 @@ static void __net_exit cttimeout_net_exit(struct net *net)
list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) {
list_del_rcu(&cur->head);
- nf_ct_l4proto_put(cur->timeout.l4proto);
if (refcount_dec_and_test(&cur->refcnt))
kfree_rcu(cur, rcu_head);
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index fff8073e2a56..2c75b9e0474e 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -25,9 +25,8 @@ struct nft_bitwise {
struct nft_data xor;
};
-static void nft_bitwise_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_bitwise_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
const u32 *src = &regs->data[priv->sreg];
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
index 13d4e421a6b3..19dbc34cc75e 100644
--- a/net/netfilter/nft_byteorder.c
+++ b/net/netfilter/nft_byteorder.c
@@ -26,9 +26,9 @@ struct nft_byteorder {
u8 size;
};
-static void nft_byteorder_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_byteorder_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_byteorder *priv = nft_expr_priv(expr);
u32 *src = &regs->data[priv->sreg];
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 79d48c1d06f4..f9f1fa66a16e 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -24,9 +24,9 @@ struct nft_cmp_expr {
enum nft_cmp_ops op:8;
};
-static void nft_cmp_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_cmp_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_cmp_expr *priv = nft_expr_priv(expr);
int d;
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index a61d7edfc290..1a6b06ce6b5b 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -104,7 +104,7 @@ static void nft_counter_obj_destroy(const struct nft_ctx *ctx,
nft_counter_do_destroy(priv);
}
-static void nft_counter_reset(struct nft_counter_percpu_priv __percpu *priv,
+static void nft_counter_reset(struct nft_counter_percpu_priv *priv,
struct nft_counter *total)
{
struct nft_counter *this_cpu;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 586627c361df..7b717fad6cdc 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -870,7 +870,7 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]);
priv->l4proto = l4num;
- l4proto = nf_ct_l4proto_find_get(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
if (l4proto->l4proto != l4num) {
ret = -EOPNOTSUPP;
@@ -902,7 +902,6 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
err_free_timeout:
kfree(timeout);
err_proto_put:
- nf_ct_l4proto_put(l4proto);
return ret;
}
@@ -913,7 +912,6 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
struct nf_ct_timeout *timeout = priv->timeout;
nf_ct_untimeout(ctx->net, timeout);
- nf_ct_l4proto_put(timeout->l4proto);
nf_ct_netns_put(ctx->net, ctx->family);
kfree(priv->timeout);
}
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 07d4efd3d851..9658493d37d4 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -62,9 +62,8 @@ err1:
return NULL;
}
-static void nft_dynset_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_dynset_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_dynset *priv = nft_expr_priv(expr);
struct nft_set *set = priv->set;
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index c2d237144f74..ea658e6c53e3 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -25,7 +25,6 @@ struct nft_jhash {
u32 modulus;
u32 seed;
u32 offset;
- struct nft_set *map;
};
static void nft_jhash_eval(const struct nft_expr *expr,
@@ -42,33 +41,10 @@ static void nft_jhash_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = h + priv->offset;
}
-static void nft_jhash_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_jhash *priv = nft_expr_priv(expr);
- const void *data = &regs->data[priv->sreg];
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = reciprocal_scale(jhash(data, priv->len, priv->seed),
- priv->modulus) + priv->offset;
-
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
- if (!found)
- return;
-
- nft_data_copy(&regs->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
struct nft_symhash {
enum nft_registers dreg:8;
u32 modulus;
u32 offset;
- struct nft_set *map;
};
static void nft_symhash_eval(const struct nft_expr *expr,
@@ -84,28 +60,6 @@ static void nft_symhash_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = h + priv->offset;
}
-static void nft_symhash_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_symhash *priv = nft_expr_priv(expr);
- struct sk_buff *skb = pkt->skb;
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = reciprocal_scale(__skb_get_hash_symmetric(skb),
- priv->modulus) + priv->offset;
-
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
- if (!found)
- return;
-
- nft_data_copy(&regs->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
[NFTA_HASH_SREG] = { .type = NLA_U32 },
[NFTA_HASH_DREG] = { .type = NLA_U32 },
@@ -114,9 +68,6 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
[NFTA_HASH_SEED] = { .type = NLA_U32 },
[NFTA_HASH_OFFSET] = { .type = NLA_U32 },
[NFTA_HASH_TYPE] = { .type = NLA_U32 },
- [NFTA_HASH_SET_NAME] = { .type = NLA_STRING,
- .len = NFT_SET_MAXNAMELEN - 1 },
- [NFTA_HASH_SET_ID] = { .type = NLA_U32 },
};
static int nft_jhash_init(const struct nft_ctx *ctx,
@@ -166,20 +117,6 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_jhash_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_jhash *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_jhash_init(ctx, expr, tb);
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_HASH_SET_NAME],
- tb[NFTA_HASH_SET_ID], genmask);
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_symhash_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -206,20 +143,6 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_symhash_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_jhash *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_symhash_init(ctx, expr, tb);
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_HASH_SET_NAME],
- tb[NFTA_HASH_SET_ID], genmask);
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_jhash_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
@@ -247,18 +170,6 @@ nla_put_failure:
return -1;
}
-static int nft_jhash_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_jhash *priv = nft_expr_priv(expr);
-
- if (nft_jhash_dump(skb, expr) ||
- nla_put_string(skb, NFTA_HASH_SET_NAME, priv->map->name))
- return -1;
-
- return 0;
-}
-
static int nft_symhash_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
@@ -279,18 +190,6 @@ nla_put_failure:
return -1;
}
-static int nft_symhash_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_symhash *priv = nft_expr_priv(expr);
-
- if (nft_symhash_dump(skb, expr) ||
- nla_put_string(skb, NFTA_HASH_SET_NAME, priv->map->name))
- return -1;
-
- return 0;
-}
-
static struct nft_expr_type nft_hash_type;
static const struct nft_expr_ops nft_jhash_ops = {
.type = &nft_hash_type,
@@ -300,14 +199,6 @@ static const struct nft_expr_ops nft_jhash_ops = {
.dump = nft_jhash_dump,
};
-static const struct nft_expr_ops nft_jhash_map_ops = {
- .type = &nft_hash_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_jhash)),
- .eval = nft_jhash_map_eval,
- .init = nft_jhash_map_init,
- .dump = nft_jhash_map_dump,
-};
-
static const struct nft_expr_ops nft_symhash_ops = {
.type = &nft_hash_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)),
@@ -316,14 +207,6 @@ static const struct nft_expr_ops nft_symhash_ops = {
.dump = nft_symhash_dump,
};
-static const struct nft_expr_ops nft_symhash_map_ops = {
- .type = &nft_hash_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)),
- .eval = nft_symhash_map_eval,
- .init = nft_symhash_map_init,
- .dump = nft_symhash_map_dump,
-};
-
static const struct nft_expr_ops *
nft_hash_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
@@ -336,12 +219,8 @@ nft_hash_select_ops(const struct nft_ctx *ctx,
type = ntohl(nla_get_be32(tb[NFTA_HASH_TYPE]));
switch (type) {
case NFT_HASH_SYM:
- if (tb[NFTA_HASH_SET_NAME])
- return &nft_symhash_map_ops;
return &nft_symhash_ops;
case NFT_HASH_JENKINS:
- if (tb[NFTA_HASH_SET_NAME])
- return &nft_jhash_map_ops;
return &nft_jhash_ops;
default:
break;
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 0777a93211e2..3e5ed787b1d4 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -17,9 +17,9 @@
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
-static void nft_immediate_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_immediate_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 6df486c5ebd3..987d2d6ce624 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -244,6 +244,16 @@ void nft_meta_get_eval(const struct nft_expr *expr,
strncpy((char *)dest, p->br->dev->name, IFNAMSIZ);
return;
#endif
+ case NFT_META_IIFKIND:
+ if (in == NULL || in->rtnl_link_ops == NULL)
+ goto err;
+ strncpy((char *)dest, in->rtnl_link_ops->kind, IFNAMSIZ);
+ break;
+ case NFT_META_OIFKIND:
+ if (out == NULL || out->rtnl_link_ops == NULL)
+ goto err;
+ strncpy((char *)dest, out->rtnl_link_ops->kind, IFNAMSIZ);
+ break;
default:
WARN_ON(1);
goto err;
@@ -340,6 +350,8 @@ static int nft_meta_get_init(const struct nft_ctx *ctx,
break;
case NFT_META_IIFNAME:
case NFT_META_OIFNAME:
+ case NFT_META_IIFKIND:
+ case NFT_META_OIFKIND:
len = IFNAMSIZ;
break;
case NFT_META_PRANDOM:
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index a3185ca2a3a9..c1f2adf198a0 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -38,7 +38,8 @@ static int nft_objref_init(const struct nft_ctx *ctx,
return -EINVAL;
objtype = ntohl(nla_get_be32(tb[NFTA_OBJREF_IMM_TYPE]));
- obj = nft_obj_lookup(ctx->table, tb[NFTA_OBJREF_IMM_NAME], objtype,
+ obj = nft_obj_lookup(ctx->net, ctx->table,
+ tb[NFTA_OBJREF_IMM_NAME], objtype,
genmask);
if (IS_ERR(obj))
return -ENOENT;
@@ -53,7 +54,7 @@ static int nft_objref_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_object *obj = nft_objref_priv(expr);
- if (nla_put_string(skb, NFTA_OBJREF_IMM_NAME, obj->name) ||
+ if (nla_put_string(skb, NFTA_OBJREF_IMM_NAME, obj->key.name) ||
nla_put_be32(skb, NFTA_OBJREF_IMM_TYPE,
htonl(obj->ops->type->type)))
goto nla_put_failure;
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index e110b0ebbf58..54e15de4b79a 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -70,9 +70,9 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
}
-static void nft_payload_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_payload_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
index 0ed124a93fcf..354cde67bca9 100644
--- a/net/netfilter/nft_quota.c
+++ b/net/netfilter/nft_quota.c
@@ -61,7 +61,7 @@ static void nft_quota_obj_eval(struct nft_object *obj,
if (overquota &&
!test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
- nft_obj_notify(nft_net(pkt), obj->table, obj, 0, 0,
+ nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0,
NFT_MSG_NEWOBJ, nft_pf(pkt), 0, GFP_ATOMIC);
}
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index cedb96c3619f..529ac8acb19d 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -23,9 +23,8 @@ struct nft_range_expr {
enum nft_range_ops op:8;
};
-static void nft_range_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_range_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_range_expr *priv = nft_expr_priv(expr);
int d1, d2;
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index f35fa33913ae..c48daed5c46b 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -53,9 +53,9 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb
return mtu - minlen;
}
-static void nft_rt_get_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_rt_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_rt *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
index e8da9a9bba73..55af9f247993 100644
--- a/net/netfilter/utils.c
+++ b/net/netfilter/utils.c
@@ -180,6 +180,25 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
}
EXPORT_SYMBOL_GPL(nf_route);
+static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
+{
+#ifdef CONFIG_INET
+ const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (!(iph->tos == rt_info->tos &&
+ skb->mark == rt_info->mark &&
+ iph->daddr == rt_info->daddr &&
+ iph->saddr == rt_info->saddr))
+ return ip_route_me_harder(entry->state.net, skb,
+ RTN_UNSPEC);
+ }
+#endif
+ return 0;
+}
+
int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
{
const struct nf_ipv6_ops *v6ops;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 2c7a4b80206f..0fa863f57575 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -159,7 +159,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
/* Make sure the timeout policy matches any existing protocol tracker,
* otherwise default to generic.
*/
- l4proto = __nf_ct_l4proto_find(proto);
+ l4proto = nf_ct_l4proto_find(proto);
if (timeout->l4proto->l4proto != l4proto->l4proto) {
ret = -EINVAL;
pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 4034d70bff39..b2e39cb6a590 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -96,8 +96,7 @@ match_outdev:
static int physdev_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
-
- br_netfilter_enable();
+ static bool brnf_probed __read_mostly;
if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
info->bitmask & ~XT_PHYSDEV_OP_MASK)
@@ -111,6 +110,12 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
return -EINVAL;
}
+
+ if (!brnf_probed) {
+ brnf_probed = true;
+ request_module("br_netfilter");
+ }
+
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 3c023d6120f6..8fa35df94c07 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1371,6 +1371,14 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
}
EXPORT_SYMBOL_GPL(netlink_has_listeners);
+bool netlink_strict_get_check(struct sk_buff *skb)
+{
+ const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
+
+ return nlk->flags & NETLINK_F_STRICT_CHK;
+}
+EXPORT_SYMBOL_GPL(netlink_strict_get_check);
+
static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index cd94f925495a..35884f836260 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -622,7 +622,7 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
if (natted) {
struct nf_conntrack_tuple inverse;
- if (!nf_ct_invert_tuplepr(&inverse, &tuple)) {
+ if (!nf_ct_invert_tuple(&inverse, &tuple)) {
pr_debug("ovs_ct_find_existing: Inversion failed!\n");
return NULL;
}
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index c038e021a591..43849d752a1e 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -206,8 +206,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
return ERR_PTR(-EINVAL);
/* Allocate and set up the meter before locking anything. */
- meter = kzalloc(n_bands * sizeof(struct dp_meter_band) +
- sizeof(*meter), GFP_KERNEL);
+ meter = kzalloc(struct_size(meter, bands, n_bands), GFP_KERNEL);
if (!meter)
return ERR_PTR(-ENOMEM);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 6a5dce8baf19..4a57fec6f306 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -18,6 +18,7 @@
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/idr.h>
+#include <linux/percpu.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
@@ -35,6 +36,7 @@ struct basic_filter {
struct tcf_result res;
struct tcf_proto *tp;
struct list_head link;
+ struct tc_basic_pcnt __percpu *pf;
struct rcu_work rwork;
};
@@ -46,8 +48,10 @@ static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct basic_filter *f;
list_for_each_entry_rcu(f, &head->flist, link) {
+ __this_cpu_inc(f->pf->rcnt);
if (!tcf_em_tree_match(skb, &f->ematches, NULL))
continue;
+ __this_cpu_inc(f->pf->rhit);
*res = f->res;
r = tcf_exts_exec(skb, &f->exts, res);
if (r < 0)
@@ -89,6 +93,7 @@ static void __basic_delete_filter(struct basic_filter *f)
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
tcf_exts_put_net(&f->exts);
+ free_percpu(f->pf);
kfree(f);
}
@@ -208,6 +213,11 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (err)
goto errout;
fnew->handle = handle;
+ fnew->pf = alloc_percpu(struct tc_basic_pcnt);
+ if (!fnew->pf) {
+ err = -ENOMEM;
+ goto errout;
+ }
err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
extack);
@@ -231,6 +241,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
return 0;
errout:
+ free_percpu(fnew->pf);
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
return err;
@@ -265,8 +276,10 @@ static void basic_bind_class(void *fh, u32 classid, unsigned long cl)
static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct sk_buff *skb, struct tcmsg *t)
{
+ struct tc_basic_pcnt gpf = {};
struct basic_filter *f = fh;
struct nlattr *nest;
+ int cpu;
if (f == NULL)
return skb->len;
@@ -281,6 +294,18 @@ static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
goto nla_put_failure;
+ for_each_possible_cpu(cpu) {
+ struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu);
+
+ gpf.rcnt += pf->rcnt;
+ gpf.rhit += pf->rhit;
+ }
+
+ if (nla_put_64bit(skb, TCA_BASIC_PCNT,
+ sizeof(struct tc_basic_pcnt),
+ &gpf, TCA_BASIC_PAD))
+ goto nla_put_failure;
+
if (tcf_exts_dump(skb, &f->exts) < 0 ||
tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
goto nla_put_failure;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 0e408ee9dcec..a1b803fd372e 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/percpu.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
@@ -22,6 +23,7 @@ struct cls_mall_head {
u32 handle;
u32 flags;
unsigned int in_hw_count;
+ struct tc_matchall_pcnt __percpu *pf;
struct rcu_work rwork;
};
@@ -34,6 +36,7 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
return -1;
*res = head->res;
+ __this_cpu_inc(head->pf->rhit);
return tcf_exts_exec(skb, &head->exts, res);
}
@@ -46,6 +49,7 @@ static void __mall_destroy(struct cls_mall_head *head)
{
tcf_exts_destroy(&head->exts);
tcf_exts_put_net(&head->exts);
+ free_percpu(head->pf);
kfree(head);
}
@@ -192,6 +196,11 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
handle = 1;
new->handle = handle;
new->flags = flags;
+ new->pf = alloc_percpu(struct tc_matchall_pcnt);
+ if (!new->pf) {
+ err = -ENOMEM;
+ goto err_alloc_percpu;
+ }
err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
extack);
@@ -214,6 +223,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
err_replace_hw_filter:
err_set_parms:
+ free_percpu(new->pf);
+err_alloc_percpu:
tcf_exts_destroy(&new->exts);
err_exts_init:
kfree(new);
@@ -270,8 +281,10 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct sk_buff *skb, struct tcmsg *t)
{
+ struct tc_matchall_pcnt gpf = {};
struct cls_mall_head *head = fh;
struct nlattr *nest;
+ int cpu;
if (!head)
return skb->len;
@@ -289,6 +302,17 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
goto nla_put_failure;
+ for_each_possible_cpu(cpu) {
+ struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
+
+ gpf.rhit += pf->rhit;
+ }
+
+ if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
+ sizeof(struct tc_matchall_pcnt),
+ &gpf, TCA_MATCHALL_PAD))
+ goto nla_put_failure;
+
if (tcf_exts_dump(skb, &head->exts))
goto nla_put_failure;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 7e4d1ccf4c87..03e26e8d0ec9 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -758,8 +758,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
return 0;
}
-void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
- unsigned int len)
+void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
{
bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
const struct Qdisc_class_ops *cops;
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 5df9d1138ac9..cd78253de31d 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -556,10 +556,11 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
* Call all network notifier blocks.
*/
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
- struct switchdev_notifier_info *info)
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
{
info->dev = dev;
- info->extack = NULL;
+ info->extack = extack;
return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
}
EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 2792a3cae682..ac306d17f8ad 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1126,7 +1126,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
skb_queue_tail(mc_inputq, skb);
return true;
}
- /* else: fall through */
+ /* fall through */
case CONN_MANAGER:
skb_queue_tail(inputq, skb);
return true;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1217c90a363b..8fc5acd4820d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -735,7 +735,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
case TIPC_CONNECTING:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
revents |= EPOLLOUT;
- /* fall thru' */
+ /* fall through */
case TIPC_LISTEN:
if (!skb_queue_empty(&sk->sk_receive_queue))
revents |= EPOLLIN | EPOLLRDNORM;
@@ -2416,7 +2416,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
* case is EINPROGRESS, rather than EALREADY.
*/
res = -EINPROGRESS;
- /* fall thru' */
+ /* fall through */
case TIPC_CONNECTING:
if (!timeout) {
if (previous == TIPC_CONNECTING)
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index a457c0fbbef1..4a708a4e8583 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -60,7 +60,6 @@
* @awork: accept work item
* @rcv_wq: receive workqueue
* @send_wq: send workqueue
- * @max_rcvbuf_size: maximum permitted receive message length
* @listener: topsrv listener socket
* @name: server name
*/
@@ -72,7 +71,6 @@ struct tipc_topsrv {
struct work_struct awork;
struct workqueue_struct *rcv_wq;
struct workqueue_struct *send_wq;
- int max_rcvbuf_size;
struct socket *listener;
char name[TIPC_SERVER_NAME_LEN];
};
@@ -648,7 +646,6 @@ int tipc_topsrv_start(struct net *net)
return -ENOMEM;
srv->net = net;
- srv->max_rcvbuf_size = sizeof(struct tipc_subscr);
INIT_WORK(&srv->awork, tipc_topsrv_accept);
strscpy(srv->name, name, sizeof(srv->name));
diff --git a/net/tipc/trace.c b/net/tipc/trace.c
index 964823841efe..265f6a26aa3d 100644
--- a/net/tipc/trace.c
+++ b/net/tipc/trace.c
@@ -111,7 +111,7 @@ int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
break;
default:
break;
- };
+ }
i += scnprintf(buf + i, sz - i, " | %u",
msg_src_droppable(hdr));
i += scnprintf(buf + i, sz - i, " %u",
@@ -122,7 +122,7 @@ int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
default:
/* need more? */
break;
- };
+ }
i += scnprintf(buf + i, sz - i, "\n");
if (!more)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 78cb4a584080..d36d095cbcf0 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -61,6 +61,8 @@ static LIST_HEAD(device_list);
static DEFINE_SPINLOCK(device_spinlock);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static struct proto_ops tls_sw_proto_ops;
+static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ struct proto *base);
static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
{
@@ -264,8 +266,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
lock_sock(sk);
sk_proto_close = ctx->sk_proto_close;
- if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) ||
- (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) {
+ if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
+ goto skip_tx_cleanup;
+
+ if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
free_ctx = true;
goto skip_tx_cleanup;
}
@@ -551,6 +555,43 @@ static struct tls_context *create_ctx(struct sock *sk)
return ctx;
}
+static void tls_build_proto(struct sock *sk)
+{
+ int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
+
+ /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
+ if (ip_ver == TLSV6 &&
+ unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+ mutex_lock(&tcpv6_prot_mutex);
+ if (likely(sk->sk_prot != saved_tcpv6_prot)) {
+ build_protos(tls_prots[TLSV6], sk->sk_prot);
+ smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+ }
+ mutex_unlock(&tcpv6_prot_mutex);
+ }
+
+ if (ip_ver == TLSV4 &&
+ unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
+ mutex_lock(&tcpv4_prot_mutex);
+ if (likely(sk->sk_prot != saved_tcpv4_prot)) {
+ build_protos(tls_prots[TLSV4], sk->sk_prot);
+ smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
+ }
+ mutex_unlock(&tcpv4_prot_mutex);
+ }
+}
+
+static void tls_hw_sk_destruct(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ ctx->sk_destruct(sk);
+ /* Free ctx */
+ kfree(ctx);
+ icsk->icsk_ulp_data = NULL;
+}
+
static int tls_hw_prot(struct sock *sk)
{
struct tls_context *ctx;
@@ -564,12 +605,17 @@ static int tls_hw_prot(struct sock *sk)
if (!ctx)
goto out;
+ spin_unlock_bh(&device_spinlock);
+ tls_build_proto(sk);
ctx->hash = sk->sk_prot->hash;
ctx->unhash = sk->sk_prot->unhash;
ctx->sk_proto_close = sk->sk_prot->close;
+ ctx->sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = tls_hw_sk_destruct;
ctx->rx_conf = TLS_HW_RECORD;
ctx->tx_conf = TLS_HW_RECORD;
update_sk_prot(sk, ctx);
+ spin_lock_bh(&device_spinlock);
rc = 1;
break;
}
@@ -668,7 +714,6 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
static int tls_init(struct sock *sk)
{
- int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
struct tls_context *ctx;
int rc = 0;
@@ -691,27 +736,7 @@ static int tls_init(struct sock *sk)
goto out;
}
- /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
- if (ip_ver == TLSV6 &&
- unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
- mutex_lock(&tcpv6_prot_mutex);
- if (likely(sk->sk_prot != saved_tcpv6_prot)) {
- build_protos(tls_prots[TLSV6], sk->sk_prot);
- smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
- }
- mutex_unlock(&tcpv6_prot_mutex);
- }
-
- if (ip_ver == TLSV4 &&
- unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
- mutex_lock(&tcpv4_prot_mutex);
- if (likely(sk->sk_prot != saved_tcpv4_prot)) {
- build_protos(tls_prots[TLSV4], sk->sk_prot);
- smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
- }
- mutex_unlock(&tcpv4_prot_mutex);
- }
-
+ tls_build_proto(sk);
ctx->tx_conf = TLS_BASE;
ctx->rx_conf = TLS_BASE;
update_sk_prot(sk, ctx);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index bf5b54b513bc..3f2a6af27e62 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -124,6 +124,7 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
{
struct aead_request *aead_req = (struct aead_request *)req;
struct scatterlist *sgout = aead_req->dst;
+ struct scatterlist *sgin = aead_req->src;
struct tls_sw_context_rx *ctx;
struct tls_context *tls_ctx;
struct scatterlist *sg;
@@ -134,12 +135,16 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
skb = (struct sk_buff *)req->data;
tls_ctx = tls_get_ctx(skb->sk);
ctx = tls_sw_ctx_rx(tls_ctx);
- pending = atomic_dec_return(&ctx->decrypt_pending);
/* Propagate if there was an err */
if (err) {
ctx->async_wait.err = err;
tls_err_abort(skb->sk, err);
+ } else {
+ struct strp_msg *rxm = strp_msg(skb);
+
+ rxm->offset += tls_ctx->rx.prepend_size;
+ rxm->full_len -= tls_ctx->rx.overhead_size;
}
/* After using skb->sk to propagate sk through crypto async callback
@@ -147,18 +152,21 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
*/
skb->sk = NULL;
- /* Release the skb, pages and memory allocated for crypto req */
- kfree_skb(skb);
- /* Skip the first S/G entry as it points to AAD */
- for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
- if (!sg)
- break;
- put_page(sg_page(sg));
+ /* Free the destination pages if skb was not decrypted inplace */
+ if (sgout != sgin) {
+ /* Skip the first S/G entry as it points to AAD */
+ for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
+ if (!sg)
+ break;
+ put_page(sg_page(sg));
+ }
}
kfree(aead_req);
+ pending = atomic_dec_return(&ctx->decrypt_pending);
+
if (!pending && READ_ONCE(ctx->async_notify))
complete(&ctx->async_wait.completion);
}
@@ -1022,8 +1030,8 @@ send_end:
return copied ? copied : ret;
}
-int tls_sw_do_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
+static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
{
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -1145,16 +1153,6 @@ sendpage_end:
return copied ? copied : ret;
}
-int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
-{
- if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
- MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
- return -ENOTSUPP;
-
- return tls_sw_do_sendpage(sk, page, offset, size, flags);
-}
-
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
@@ -1283,7 +1281,7 @@ out:
static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
struct iov_iter *out_iov,
struct scatterlist *out_sg,
- int *chunk, bool *zc)
+ int *chunk, bool *zc, bool async)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -1383,13 +1381,13 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
fallback_to_reg_recv:
sgout = sgin;
pages = 0;
- *chunk = 0;
+ *chunk = data_len;
*zc = false;
}
/* Prepare and submit AEAD request */
err = tls_do_decryption(sk, skb, sgin, sgout, iv,
- data_len, aead_req, *zc);
+ data_len, aead_req, async);
if (err == -EINPROGRESS)
return err;
@@ -1402,7 +1400,8 @@ fallback_to_reg_recv:
}
static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
- struct iov_iter *dest, int *chunk, bool *zc)
+ struct iov_iter *dest, int *chunk, bool *zc,
+ bool async)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -1415,7 +1414,7 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
return err;
#endif
if (!ctx->decrypted) {
- err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
+ err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, async);
if (err < 0) {
if (err == -EINPROGRESS)
tls_advance_record_sn(sk, &tls_ctx->rx);
@@ -1441,7 +1440,7 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
bool zc = true;
int chunk;
- return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
+ return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
}
static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
@@ -1468,6 +1467,77 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
return true;
}
+/* This function traverses the rx_list in tls receive context to copies the
+ * decrypted data records into the buffer provided by caller zero copy is not
+ * true. Further, the records are removed from the rx_list if it is not a peek
+ * case and the record has been consumed completely.
+ */
+static int process_rx_list(struct tls_sw_context_rx *ctx,
+ struct msghdr *msg,
+ size_t skip,
+ size_t len,
+ bool zc,
+ bool is_peek)
+{
+ struct sk_buff *skb = skb_peek(&ctx->rx_list);
+ ssize_t copied = 0;
+
+ while (skip && skb) {
+ struct strp_msg *rxm = strp_msg(skb);
+
+ if (skip < rxm->full_len)
+ break;
+
+ skip = skip - rxm->full_len;
+ skb = skb_peek_next(skb, &ctx->rx_list);
+ }
+
+ while (len && skb) {
+ struct sk_buff *next_skb;
+ struct strp_msg *rxm = strp_msg(skb);
+ int chunk = min_t(unsigned int, rxm->full_len - skip, len);
+
+ if (!zc || (rxm->full_len - skip) > len) {
+ int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
+ msg, chunk);
+ if (err < 0)
+ return err;
+ }
+
+ len = len - chunk;
+ copied = copied + chunk;
+
+ /* Consume the data from record if it is non-peek case*/
+ if (!is_peek) {
+ rxm->offset = rxm->offset + chunk;
+ rxm->full_len = rxm->full_len - chunk;
+
+ /* Return if there is unconsumed data in the record */
+ if (rxm->full_len - skip)
+ break;
+ }
+
+ /* The remaining skip-bytes must lie in 1st record in rx_list.
+ * So from the 2nd record, 'skip' should be 0.
+ */
+ skip = 0;
+
+ if (msg)
+ msg->msg_flags |= MSG_EOR;
+
+ next_skb = skb_peek_next(skb, &ctx->rx_list);
+
+ if (!is_peek) {
+ skb_unlink(skb, &ctx->rx_list);
+ kfree_skb(skb);
+ }
+
+ skb = next_skb;
+ }
+
+ return copied;
+}
+
int tls_sw_recvmsg(struct sock *sk,
struct msghdr *msg,
size_t len,
@@ -1478,7 +1548,8 @@ int tls_sw_recvmsg(struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct sk_psock *psock;
- unsigned char control;
+ unsigned char control = 0;
+ ssize_t decrypted = 0;
struct strp_msg *rxm;
struct sk_buff *skb;
ssize_t copied = 0;
@@ -1486,6 +1557,7 @@ int tls_sw_recvmsg(struct sock *sk,
int target, err = 0;
long timeo;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
+ bool is_peek = flags & MSG_PEEK;
int num_async = 0;
flags |= nonblock;
@@ -1496,11 +1568,28 @@ int tls_sw_recvmsg(struct sock *sk,
psock = sk_psock_get(sk);
lock_sock(sk);
- target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ /* Process pending decrypted records. It must be non-zero-copy */
+ err = process_rx_list(ctx, msg, 0, len, false, is_peek);
+ if (err < 0) {
+ tls_err_abort(sk, err);
+ goto end;
+ } else {
+ copied = err;
+ }
+
+ len = len - copied;
+ if (len) {
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ } else {
+ goto recv_end;
+ }
+
do {
- bool zc = false;
+ bool retain_skb = false;
bool async = false;
+ bool zc = false;
+ int to_decrypt;
int chunk = 0;
skb = tls_wait_data(sk, psock, flags, timeo, &err);
@@ -1510,7 +1599,7 @@ int tls_sw_recvmsg(struct sock *sk,
msg, len, flags);
if (ret > 0) {
- copied += ret;
+ decrypted += ret;
len -= ret;
continue;
}
@@ -1537,70 +1626,70 @@ int tls_sw_recvmsg(struct sock *sk,
goto recv_end;
}
- if (!ctx->decrypted) {
- int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
+ to_decrypt = rxm->full_len - tls_ctx->rx.overhead_size;
- if (!is_kvec && to_copy <= len &&
- likely(!(flags & MSG_PEEK)))
- zc = true;
+ if (to_decrypt <= len && !is_kvec && !is_peek)
+ zc = true;
- err = decrypt_skb_update(sk, skb, &msg->msg_iter,
- &chunk, &zc);
- if (err < 0 && err != -EINPROGRESS) {
- tls_err_abort(sk, EBADMSG);
- goto recv_end;
- }
-
- if (err == -EINPROGRESS) {
- async = true;
- num_async++;
- goto pick_next_record;
- }
-
- ctx->decrypted = true;
+ err = decrypt_skb_update(sk, skb, &msg->msg_iter,
+ &chunk, &zc, ctx->async_capable);
+ if (err < 0 && err != -EINPROGRESS) {
+ tls_err_abort(sk, EBADMSG);
+ goto recv_end;
}
- if (!zc) {
- chunk = min_t(unsigned int, rxm->full_len, len);
+ if (err == -EINPROGRESS) {
+ async = true;
+ num_async++;
+ goto pick_next_record;
+ } else {
+ if (!zc) {
+ if (rxm->full_len > len) {
+ retain_skb = true;
+ chunk = len;
+ } else {
+ chunk = rxm->full_len;
+ }
- err = skb_copy_datagram_msg(skb, rxm->offset, msg,
- chunk);
- if (err < 0)
- goto recv_end;
+ err = skb_copy_datagram_msg(skb, rxm->offset,
+ msg, chunk);
+ if (err < 0)
+ goto recv_end;
+
+ if (!is_peek) {
+ rxm->offset = rxm->offset + chunk;
+ rxm->full_len = rxm->full_len - chunk;
+ }
+ }
}
pick_next_record:
- copied += chunk;
+ if (chunk > len)
+ chunk = len;
+
+ decrypted += chunk;
len -= chunk;
- if (likely(!(flags & MSG_PEEK))) {
- u8 control = ctx->control;
-
- /* For async, drop current skb reference */
- if (async)
- skb = NULL;
-
- if (tls_sw_advance_skb(sk, skb, chunk)) {
- /* Return full control message to
- * userspace before trying to parse
- * another message type
- */
- msg->msg_flags |= MSG_EOR;
- if (control != TLS_RECORD_TYPE_DATA)
- goto recv_end;
- } else {
- break;
- }
- } else {
- /* MSG_PEEK right now cannot look beyond current skb
- * from strparser, meaning we cannot advance skb here
- * and thus unpause strparser since we'd loose original
- * one.
+
+ /* For async or peek case, queue the current skb */
+ if (async || is_peek || retain_skb) {
+ skb_queue_tail(&ctx->rx_list, skb);
+ skb = NULL;
+ }
+
+ if (tls_sw_advance_skb(sk, skb, chunk)) {
+ /* Return full control message to
+ * userspace before trying to parse
+ * another message type
*/
+ msg->msg_flags |= MSG_EOR;
+ if (ctx->control != TLS_RECORD_TYPE_DATA)
+ goto recv_end;
+ } else {
break;
}
/* If we have a new message from strparser, continue now. */
- if (copied >= target && !ctx->recv_pkt)
+ if (decrypted >= target && !ctx->recv_pkt)
break;
} while (len);
@@ -1614,13 +1703,33 @@ recv_end:
/* one of async decrypt failed */
tls_err_abort(sk, err);
copied = 0;
+ decrypted = 0;
+ goto end;
}
} else {
reinit_completion(&ctx->async_wait.completion);
}
WRITE_ONCE(ctx->async_notify, false);
+
+ /* Drain records from the rx_list & copy if required */
+ if (is_peek || is_kvec)
+ err = process_rx_list(ctx, msg, copied,
+ decrypted, false, is_peek);
+ else
+ err = process_rx_list(ctx, msg, 0,
+ decrypted, true, is_peek);
+ if (err < 0) {
+ tls_err_abort(sk, err);
+ copied = 0;
+ goto end;
+ }
+
+ WARN_ON(decrypted != err);
}
+ copied += decrypted;
+
+end:
release_sock(sk);
if (psock)
sk_psock_put(sk, psock);
@@ -1657,7 +1766,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
}
if (!ctx->decrypted) {
- err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
+ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
if (err < 0) {
tls_err_abort(sk, EBADMSG);
@@ -1846,6 +1955,7 @@ void tls_sw_release_resources_rx(struct sock *sk)
if (ctx->aead_recv) {
kfree_skb(ctx->recv_pkt);
ctx->recv_pkt = NULL;
+ skb_queue_purge(&ctx->rx_list);
crypto_free_aead(ctx->aead_recv);
strp_stop(&ctx->strp);
write_lock_bh(&sk->sk_callback_lock);
@@ -1895,6 +2005,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
struct crypto_aead **aead;
struct strp_callbacks cb;
u16 nonce_size, tag_size, iv_size, rec_seq_size;
+ struct crypto_tfm *tfm;
char *iv, *rec_seq;
int rc = 0;
@@ -1941,6 +2052,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
crypto_init_wait(&sw_ctx_rx->async_wait);
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
+ skb_queue_head_init(&sw_ctx_rx->rx_list);
aead = &sw_ctx_rx->aead_recv;
}
@@ -2008,6 +2120,10 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_aead;
if (sw_ctx_rx) {
+ tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
+ sw_ctx_rx->async_capable =
+ tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
+
/* Set up strparser */
memset(&cb, 0, sizeof(cb));
cb.rcv_msg = tls_queue;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 43a1dec08825..a60df252d3cc 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -505,7 +505,7 @@ out:
static int __vsock_bind_stream(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
- static u32 port = 0;
+ static u32 port;
struct sockaddr_vm new_addr;
if (!port)
diff --git a/net/xdp/Kconfig b/net/xdp/Kconfig
index 90e4a7152854..0255b33cff4b 100644
--- a/net/xdp/Kconfig
+++ b/net/xdp/Kconfig
@@ -5,3 +5,11 @@ config XDP_SOCKETS
help
XDP sockets allows a channel between XDP programs and
userspace applications.
+
+config XDP_SOCKETS_DIAG
+ tristate "XDP sockets: monitoring interface"
+ depends on XDP_SOCKETS
+ default n
+ help
+ Support for PF_XDP sockets monitoring interface used by the ss tool.
+ If unsure, say Y.
diff --git a/net/xdp/Makefile b/net/xdp/Makefile
index 04f073146256..59dbfdf93dca 100644
--- a/net/xdp/Makefile
+++ b/net/xdp/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o xsk_queue.o
+obj-$(CONFIG_XDP_SOCKETS_DIAG) += xsk_diag.o
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index d4de871e7d4d..5ab236c5c9a5 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -13,12 +13,15 @@
#include <linux/mm.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
+#include <linux/idr.h>
#include "xdp_umem.h"
#include "xsk_queue.h"
#define XDP_UMEM_MIN_CHUNK_SIZE 2048
+static DEFINE_IDA(umem_ida);
+
void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
{
unsigned long flags;
@@ -67,6 +70,7 @@ struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
return NULL;
}
+EXPORT_SYMBOL(xdp_get_umem_from_qid);
static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
{
@@ -193,6 +197,8 @@ static void xdp_umem_release(struct xdp_umem *umem)
xdp_umem_clear_dev(umem);
+ ida_simple_remove(&umem_ida, umem->id);
+
if (umem->fq) {
xskq_destroy(umem->fq);
umem->fq = NULL;
@@ -399,8 +405,16 @@ struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
if (!umem)
return ERR_PTR(-ENOMEM);
+ err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
+ if (err < 0) {
+ kfree(umem);
+ return ERR_PTR(err);
+ }
+ umem->id = err;
+
err = xdp_umem_reg(umem, mr);
if (err) {
+ ida_simple_remove(&umem_ida, umem->id);
kfree(umem);
return ERR_PTR(err);
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a03268454a27..949d3bbccb2f 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -27,14 +27,10 @@
#include "xsk_queue.h"
#include "xdp_umem.h"
+#include "xsk.h"
#define TX_BATCH_SIZE 16
-static struct xdp_sock *xdp_sk(struct sock *sk)
-{
- return (struct xdp_sock *)sk;
-}
-
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
@@ -350,6 +346,10 @@ static int xsk_release(struct socket *sock)
net = sock_net(sk);
+ mutex_lock(&net->xdp.lock);
+ sk_del_node_init_rcu(sk);
+ mutex_unlock(&net->xdp.lock);
+
local_bh_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
local_bh_enable();
@@ -746,6 +746,10 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
mutex_init(&xs->mutex);
spin_lock_init(&xs->tx_completion_lock);
+ mutex_lock(&net->xdp.lock);
+ sk_add_node_rcu(sk, &net->xdp.list);
+ mutex_unlock(&net->xdp.lock);
+
local_bh_disable();
sock_prot_inuse_add(net, &xsk_proto, 1);
local_bh_enable();
@@ -759,6 +763,23 @@ static const struct net_proto_family xsk_family_ops = {
.owner = THIS_MODULE,
};
+static int __net_init xsk_net_init(struct net *net)
+{
+ mutex_init(&net->xdp.lock);
+ INIT_HLIST_HEAD(&net->xdp.list);
+ return 0;
+}
+
+static void __net_exit xsk_net_exit(struct net *net)
+{
+ WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
+}
+
+static struct pernet_operations xsk_net_ops = {
+ .init = xsk_net_init,
+ .exit = xsk_net_exit,
+};
+
static int __init xsk_init(void)
{
int err;
@@ -771,8 +792,13 @@ static int __init xsk_init(void)
if (err)
goto out_proto;
+ err = register_pernet_subsys(&xsk_net_ops);
+ if (err)
+ goto out_sk;
return 0;
+out_sk:
+ sock_unregister(PF_XDP);
out_proto:
proto_unregister(&xsk_proto);
out:
diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h
new file mode 100644
index 000000000000..ba8120610426
--- /dev/null
+++ b/net/xdp/xsk.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. */
+
+#ifndef XSK_H_
+#define XSK_H_
+
+static inline struct xdp_sock *xdp_sk(struct sock *sk)
+{
+ return (struct xdp_sock *)sk;
+}
+
+#endif /* XSK_H_ */
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
new file mode 100644
index 000000000000..661d007c3b28
--- /dev/null
+++ b/net/xdp/xsk_diag.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/* XDP sockets monitoring support
+ *
+ * Copyright(c) 2019 Intel Corporation.
+ *
+ * Author: Björn Töpel <bjorn.topel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <net/xdp_sock.h>
+#include <linux/xdp_diag.h>
+#include <linux/sock_diag.h>
+
+#include "xsk_queue.h"
+#include "xsk.h"
+
+static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
+{
+ struct xdp_diag_info di = {};
+
+ di.ifindex = xs->dev ? xs->dev->ifindex : 0;
+ di.queue_id = xs->queue_id;
+ return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
+}
+
+static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
+ struct sk_buff *nlskb)
+{
+ struct xdp_diag_ring dr = {};
+
+ dr.entries = queue->nentries;
+ return nla_put(nlskb, nl_type, sizeof(dr), &dr);
+}
+
+static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
+ struct sk_buff *nlskb)
+{
+ int err = 0;
+
+ if (xs->rx)
+ err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
+ if (!err && xs->tx)
+ err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
+ return err;
+}
+
+static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
+{
+ struct xdp_umem *umem = xs->umem;
+ struct xdp_diag_umem du = {};
+ int err;
+
+ if (!umem)
+ return 0;
+
+ du.id = umem->id;
+ du.size = umem->size;
+ du.num_pages = umem->npgs;
+ du.chunk_size = (__u32)(~umem->chunk_mask + 1);
+ du.headroom = umem->headroom;
+ du.ifindex = umem->dev ? umem->dev->ifindex : 0;
+ du.queue_id = umem->queue_id;
+ du.flags = 0;
+ if (umem->zc)
+ du.flags |= XDP_DU_F_ZEROCOPY;
+ du.refs = refcount_read(&umem->users);
+
+ err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
+
+ if (!err && umem->fq)
+ err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_FILL_RING, nlskb);
+ if (!err && umem->cq) {
+ err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_COMPLETION_RING,
+ nlskb);
+ }
+ return err;
+}
+
+static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
+ struct xdp_diag_req *req,
+ struct user_namespace *user_ns,
+ u32 portid, u32 seq, u32 flags, int sk_ino)
+{
+ struct xdp_sock *xs = xdp_sk(sk);
+ struct xdp_diag_msg *msg;
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
+ flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ msg = nlmsg_data(nlh);
+ memset(msg, 0, sizeof(*msg));
+ msg->xdiag_family = AF_XDP;
+ msg->xdiag_type = sk->sk_type;
+ msg->xdiag_ino = sk_ino;
+ sock_diag_save_cookie(sk, msg->xdiag_cookie);
+
+ if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
+ goto out_nlmsg_trim;
+
+ if ((req->xdiag_show & XDP_SHOW_INFO) &&
+ nla_put_u32(nlskb, XDP_DIAG_UID,
+ from_kuid_munged(user_ns, sock_i_uid(sk))))
+ goto out_nlmsg_trim;
+
+ if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
+ xsk_diag_put_rings_cfg(xs, nlskb))
+ goto out_nlmsg_trim;
+
+ if ((req->xdiag_show & XDP_SHOW_UMEM) &&
+ xsk_diag_put_umem(xs, nlskb))
+ goto out_nlmsg_trim;
+
+ if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
+ sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
+ goto out_nlmsg_trim;
+
+ nlmsg_end(nlskb, nlh);
+ return 0;
+
+out_nlmsg_trim:
+ nlmsg_cancel(nlskb, nlh);
+ return -EMSGSIZE;
+}
+
+static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
+{
+ struct xdp_diag_req *req = nlmsg_data(cb->nlh);
+ struct net *net = sock_net(nlskb->sk);
+ int num = 0, s_num = cb->args[0];
+ struct sock *sk;
+
+ mutex_lock(&net->xdp.lock);
+
+ sk_for_each(sk, &net->xdp.list) {
+ if (!net_eq(sock_net(sk), net))
+ continue;
+ if (num++ < s_num)
+ continue;
+
+ if (xsk_diag_fill(sk, nlskb, req,
+ sk_user_ns(NETLINK_CB(cb->skb).sk),
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ sock_i_ino(sk)) < 0) {
+ num--;
+ break;
+ }
+ }
+
+ mutex_unlock(&net->xdp.lock);
+ cb->args[0] = num;
+ return nlskb->len;
+}
+
+static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
+{
+ struct netlink_dump_control c = { .dump = xsk_diag_dump };
+ int hdrlen = sizeof(struct xdp_diag_req);
+ struct net *net = sock_net(nlskb->sk);
+
+ if (nlmsg_len(hdr) < hdrlen)
+ return -EINVAL;
+
+ if (!(hdr->nlmsg_flags & NLM_F_DUMP))
+ return -EOPNOTSUPP;
+
+ return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
+}
+
+static const struct sock_diag_handler xsk_diag_handler = {
+ .family = AF_XDP,
+ .dump = xsk_diag_handler_dump,
+};
+
+static int __init xsk_diag_init(void)
+{
+ return sock_diag_register(&xsk_diag_handler);
+}
+
+static void __exit xsk_diag_exit(void)
+{
+ sock_diag_unregister(&xsk_diag_handler);
+}
+
+module_init(xsk_diag_init);
+module_exit(xsk_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);
diff --git a/samples/bpf/bpf_insn.h b/samples/bpf/bpf_insn.h
index 20dc5cefec84..544237980582 100644
--- a/samples/bpf/bpf_insn.h
+++ b/samples/bpf/bpf_insn.h
@@ -164,6 +164,16 @@ struct bpf_insn;
.off = OFF, \
.imm = 0 })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
@@ -174,6 +184,16 @@ struct bpf_insn;
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Raw code statement block */
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index d07ccf8a23f7..d43fce568ef7 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -142,5 +142,6 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
new file mode 100644
index 000000000000..8d489a26e3c9
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -0,0 +1,85 @@
+===============
+bpftool-feature
+===============
+-------------------------------------------------------------------------------
+tool for inspection of eBPF-related parameters for Linux kernel or net device
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **feature** *COMMAND*
+
+ *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+
+ *COMMANDS* := { **probe** | **help** }
+
+MAP COMMANDS
+=============
+
+| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature help**
+|
+| *COMPONENT* := { **kernel** | **dev** *NAME* }
+
+DESCRIPTION
+===========
+ **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]]
+ Probe the running kernel and dump a number of eBPF-related
+ parameters, such as availability of the **bpf()** system call,
+ JIT status, eBPF program types availability, eBPF helper
+ functions availability, and more.
+
+ If the **macros** keyword (but not the **-j** option) is
+ passed, a subset of the output is dumped as a list of
+ **#define** macros that are ready to be included in a C
+ header file, for example. If, additionally, **prefix** is
+ used to define a *PREFIX*, the provided string will be used
+ as a prefix to the names of the macros: this can be used to
+ avoid conflicts on macro names when including the output of
+ this command as a header file.
+
+ Keyword **kernel** can be omitted. If no probe target is
+ specified, probing the kernel is the default behaviour.
+
+ Note that when probed, some eBPF helpers (e.g.
+ **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may
+ print warnings to kernel logs.
+
+ **bpftool feature probe dev** *NAME* [**macros** [**prefix** *PREFIX*]]
+ Probe network device for supported eBPF features and dump
+ results to the console.
+
+ The two keywords **macros** and **prefix** have the same
+ role as when probing the kernel.
+
+ **bpftool feature help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -v, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+SEE ALSO
+========
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 64b001b4f777..5c984ffc9f01 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -25,12 +25,17 @@ MAP COMMANDS
| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
| **bpftool** **map dump** *MAP*
-| **bpftool** **map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*]
-| **bpftool** **map lookup** *MAP* **key** *DATA*
+| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
+| **bpftool** **map lookup** *MAP* [**key** *DATA*]
| **bpftool** **map getnext** *MAP* [**key** *DATA*]
| **bpftool** **map delete** *MAP* **key** *DATA*
| **bpftool** **map pin** *MAP* *FILE*
| **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*]
+| **bpftool** **map peek** *MAP*
+| **bpftool** **map push** *MAP* **value** *VALUE*
+| **bpftool** **map pop** *MAP*
+| **bpftool** **map enqueue** *MAP* **value** *VALUE*
+| **bpftool** **map dequeue** *MAP*
| **bpftool** **map help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
@@ -62,7 +67,7 @@ DESCRIPTION
**bpftool map dump** *MAP*
Dump all entries in a given *MAP*.
- **bpftool map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*]
+ **bpftool map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
Update map entry for a given *KEY*.
*UPDATE_FLAGS* can be one of: **any** update existing entry
@@ -75,7 +80,7 @@ DESCRIPTION
the bytes are parsed as decimal values, unless a "0x" prefix
(for hexadecimal) or a "0" prefix (for octal) is provided.
- **bpftool map lookup** *MAP* **key** *DATA*
+ **bpftool map lookup** *MAP* [**key** *DATA*]
Lookup **key** in the map.
**bpftool map getnext** *MAP* [**key** *DATA*]
@@ -107,6 +112,21 @@ DESCRIPTION
replace any existing ring. Any other application will stop
receiving events if it installed its rings earlier.
+ **bpftool map peek** *MAP*
+ Peek next **value** in the queue or stack.
+
+ **bpftool map push** *MAP* **value** *VALUE*
+ Push **value** onto the stack.
+
+ **bpftool map pop** *MAP*
+ Pop and print **value** from the stack.
+
+ **bpftool map enqueue** *MAP* **value** *VALUE*
+ Enqueue **value** into the queue.
+
+ **bpftool map dequeue** *MAP*
+ Dequeue and print **value** from the queue.
+
**bpftool map help**
Print short help message.
@@ -236,5 +256,6 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index ed87c9b619ad..779dab3650ee 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -142,4 +142,5 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index f4c5e5538bb8..bca5590a80d0 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -84,4 +84,5 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 58c8369b77dd..13b56102f528 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -258,5 +258,6 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index e1677e81ed59..27153bb816ac 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -72,5 +72,6 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index e4e4fab1b8c7..763dd12482aa 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -50,14 +50,15 @@ _bpftool_get_map_ids()
command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
}
-_bpftool_get_perf_map_ids()
+# Takes map type and adds matching map ids to the list of suggestions.
+_bpftool_get_map_ids_for_type()
{
+ local type="$1"
COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
- command grep -C2 perf_event_array | \
+ command grep -C2 "$type" | \
command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
}
-
_bpftool_get_prog_ids()
{
COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
@@ -99,15 +100,25 @@ _sysfs_get_netdevs()
"$cur" ) )
}
-# For bpftool map update: retrieve type of the map to update.
-_bpftool_map_update_map_type()
+# Retrieve type of the map that we are operating on.
+_bpftool_map_guess_map_type()
{
local keyword ref
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
- if [[ ${words[$((idx-2))]} == "update" ]]; then
- keyword=${words[$((idx-1))]}
- ref=${words[$((idx))]}
- fi
+ case "${words[$((idx-2))]}" in
+ lookup|update)
+ keyword=${words[$((idx-1))]}
+ ref=${words[$((idx))]}
+ ;;
+ push)
+ printf "stack"
+ return 0
+ ;;
+ enqueue)
+ printf "queue"
+ return 0
+ ;;
+ esac
done
[[ -z $ref ]] && return 0
@@ -119,6 +130,8 @@ _bpftool_map_update_map_type()
_bpftool_map_update_get_id()
{
+ local command="$1"
+
# Is it the map to update, or a map to insert into the map to update?
# Search for "value" keyword.
local idx value
@@ -128,11 +141,24 @@ _bpftool_map_update_get_id()
break
fi
done
- [[ $value -eq 0 ]] && _bpftool_get_map_ids && return 0
+ if [[ $value -eq 0 ]]; then
+ case "$command" in
+ push)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ enqueue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
+ return 0
+ fi
# Id to complete is for a value. It can be either prog id or map id. This
# depends on the type of the map to update.
- local type=$(_bpftool_map_update_map_type)
+ local type=$(_bpftool_map_guess_map_type)
case $type in
array_of_maps|hash_of_maps)
_bpftool_get_map_ids
@@ -382,14 +408,28 @@ _bpftool()
map)
local MAP_TYPE='id pinned'
case $command in
- show|list|dump)
+ show|list|dump|peek|pop|dequeue)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
return 0
;;
id)
- _bpftool_get_map_ids
+ case "$command" in
+ peek)
+ _bpftool_get_map_ids_for_type stack
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ pop)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ dequeue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
return 0
;;
*)
@@ -447,19 +487,25 @@ _bpftool()
COMPREPLY+=( $( compgen -W 'hex' -- "$cur" ) )
;;
*)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ return 0
+ ;;
+ esac
+
_bpftool_once_attr 'key'
return 0
;;
esac
;;
- update)
+ update|push|enqueue)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
return 0
;;
id)
- _bpftool_map_update_get_id
+ _bpftool_map_update_get_id $command
return 0
;;
key)
@@ -468,7 +514,7 @@ _bpftool()
value)
# We can have bytes, or references to a prog or a
# map, depending on the type of the map to update.
- case $(_bpftool_map_update_map_type) in
+ case "$(_bpftool_map_guess_map_type)" in
array_of_maps|hash_of_maps)
local MAP_TYPE='id pinned'
COMPREPLY+=( $( compgen -W "$MAP_TYPE" \
@@ -490,6 +536,13 @@ _bpftool()
return 0
;;
*)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ _bpftool_once_attr 'value'
+ return 0;
+ ;;
+ esac
+
_bpftool_once_attr 'key'
local UPDATE_FLAGS='any exist noexist'
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
@@ -508,6 +561,7 @@ _bpftool()
return 0
fi
done
+
return 0
;;
esac
@@ -527,7 +581,7 @@ _bpftool()
return 0
;;
id)
- _bpftool_get_perf_map_ids
+ _bpftool_get_map_ids_for_type perf_event_array
return 0
;;
cpu)
@@ -546,7 +600,8 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'delete dump getnext help \
- lookup pin event_pipe show list update create' -- \
+ lookup pin event_pipe show list update create \
+ peek push enqueue pop dequeue' -- \
"$cur" ) )
;;
esac
@@ -624,6 +679,25 @@ _bpftool()
;;
esac
;;
+ feature)
+ case $command in
+ probe)
+ [[ $prev == "dev" ]] && _sysfs_get_netdevs && return 0
+ [[ $prev == "prefix" ]] && return 0
+ if _bpftool_search_list 'macros'; then
+ COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) )
+ else
+ COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) )
+ fi
+ _bpftool_one_of_list 'kernel dev'
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help probe' -- "$cur" ) )
+ ;;
+ esac
+ ;;
esac
} &&
complete -F _bpftool bpftool
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 6ba5f567a9d8..e63bce0755eb 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -73,35 +73,104 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
return ret;
}
+static void btf_int128_print(json_writer_t *jw, const void *data,
+ bool is_plain_text)
+{
+ /* data points to a __int128 number.
+ * Suppose
+ * int128_num = *(__int128 *)data;
+ * The below formulas shows what upper_num and lower_num represents:
+ * upper_num = int128_num >> 64;
+ * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
+ */
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = *(__u64 *)data;
+ lower_num = *(__u64 *)(data + 8);
+#else
+ upper_num = *(__u64 *)(data + 8);
+ lower_num = *(__u64 *)data;
+#endif
+
+ if (is_plain_text) {
+ if (upper_num == 0)
+ jsonw_printf(jw, "0x%llx", lower_num);
+ else
+ jsonw_printf(jw, "0x%llx%016llx", upper_num, lower_num);
+ } else {
+ if (upper_num == 0)
+ jsonw_printf(jw, "\"0x%llx\"", lower_num);
+ else
+ jsonw_printf(jw, "\"0x%llx%016llx\"", upper_num, lower_num);
+ }
+}
+
+static void btf_int128_shift(__u64 *print_num, u16 left_shift_bits,
+ u16 right_shift_bits)
+{
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = print_num[0];
+ lower_num = print_num[1];
+#else
+ upper_num = print_num[1];
+ lower_num = print_num[0];
+#endif
+
+ /* shake out un-needed bits by shift/or operations */
+ if (left_shift_bits >= 64) {
+ upper_num = lower_num << (left_shift_bits - 64);
+ lower_num = 0;
+ } else {
+ upper_num = (upper_num << left_shift_bits) |
+ (lower_num >> (64 - left_shift_bits));
+ lower_num = lower_num << left_shift_bits;
+ }
+
+ if (right_shift_bits >= 64) {
+ lower_num = upper_num >> (right_shift_bits - 64);
+ upper_num = 0;
+ } else {
+ lower_num = (lower_num >> right_shift_bits) |
+ (upper_num << (64 - right_shift_bits));
+ upper_num = upper_num >> right_shift_bits;
+ }
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ print_num[0] = upper_num;
+ print_num[1] = lower_num;
+#else
+ print_num[0] = lower_num;
+ print_num[1] = upper_num;
+#endif
+}
+
static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int left_shift_bits, right_shift_bits;
+ __u64 print_num[2] = {};
int bytes_to_copy;
int bits_to_copy;
- __u64 print_num;
bits_to_copy = bit_offset + nr_bits;
bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
- print_num = 0;
- memcpy(&print_num, data, bytes_to_copy);
+ memcpy(print_num, data, bytes_to_copy);
#if defined(__BIG_ENDIAN_BITFIELD)
left_shift_bits = bit_offset;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- left_shift_bits = 64 - bits_to_copy;
+ left_shift_bits = 128 - bits_to_copy;
#else
#error neither big nor little endian
#endif
- right_shift_bits = 64 - nr_bits;
+ right_shift_bits = 128 - nr_bits;
- print_num <<= left_shift_bits;
- print_num >>= right_shift_bits;
- if (is_plain_text)
- jsonw_printf(jw, "0x%llx", print_num);
- else
- jsonw_printf(jw, "%llu", print_num);
+ btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
+ btf_int128_print(jw, print_num, is_plain_text);
}
@@ -113,7 +182,7 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
int total_bits_offset;
/* bits_offset is at most 7.
- * BTF_INT_OFFSET() cannot exceed 64 bits.
+ * BTF_INT_OFFSET() cannot exceed 128 bits.
*/
total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
@@ -139,6 +208,11 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
return 0;
}
+ if (nr_bits == 128) {
+ btf_int128_print(jw, data, is_plain_text);
+ return 0;
+ }
+
switch (BTF_INT_ENCODING(*int_type)) {
case 0:
if (BTF_INT_BITS(*int_type) == 64)
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
index 31f0db41513f..3e21f994f262 100644
--- a/tools/bpf/bpftool/cfg.c
+++ b/tools/bpf/bpftool/cfg.c
@@ -157,6 +157,11 @@ static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
return false;
}
+static bool is_jmp_insn(u8 code)
+{
+ return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
+}
+
static bool func_partition_bb_head(struct func_node *func)
{
struct bpf_insn *cur, *end;
@@ -170,7 +175,7 @@ static bool func_partition_bb_head(struct func_node *func)
return true;
for (; cur <= end; cur++) {
- if (BPF_CLASS(cur->code) == BPF_JMP) {
+ if (is_jmp_insn(cur->code)) {
u8 opcode = BPF_OP(cur->code);
if (opcode == BPF_EXIT || opcode == BPF_CALL)
@@ -296,7 +301,7 @@ static bool func_add_bb_edges(struct func_node *func)
e->src = bb;
insn = bb->tail;
- if (BPF_CLASS(insn->code) != BPF_JMP ||
+ if (!is_jmp_insn(insn->code) ||
BPF_OP(insn->code) == BPF_EXIT) {
e->dst = bb_next(bb);
e->flags |= EDGE_FLAG_FALLTHROUGH;
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
new file mode 100644
index 000000000000..d672d9086fff
--- /dev/null
+++ b/tools/bpf/bpftool/feature.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/utsname.h>
+#include <sys/vfs.h>
+
+#include <linux/filter.h>
+#include <linux/limits.h>
+
+#include <bpf.h>
+#include <libbpf.h>
+
+#include "main.h"
+
+#ifndef PROC_SUPER_MAGIC
+# define PROC_SUPER_MAGIC 0x9fa0
+#endif
+
+enum probe_component {
+ COMPONENT_UNSPEC,
+ COMPONENT_KERNEL,
+ COMPONENT_DEVICE,
+};
+
+#define BPF_HELPER_MAKE_ENTRY(name) [BPF_FUNC_ ## name] = "bpf_" # name
+static const char * const helper_name[] = {
+ __BPF_FUNC_MAPPER(BPF_HELPER_MAKE_ENTRY)
+};
+
+#undef BPF_HELPER_MAKE_ENTRY
+
+/* Miscellaneous utility functions */
+
+static bool check_procfs(void)
+{
+ struct statfs st_fs;
+
+ if (statfs("/proc", &st_fs) < 0)
+ return false;
+ if ((unsigned long)st_fs.f_type != PROC_SUPER_MAGIC)
+ return false;
+
+ return true;
+}
+
+static void uppercase(char *str, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len && str[i] != '\0'; i++)
+ str[i] = toupper(str[i]);
+}
+
+/* Printing utility functions */
+
+static void
+print_bool_feature(const char *feat_name, const char *plain_name,
+ const char *define_name, bool res, const char *define_prefix)
+{
+ if (json_output)
+ jsonw_bool_field(json_wtr, feat_name, res);
+ else if (define_prefix)
+ printf("#define %s%sHAVE_%s\n", define_prefix,
+ res ? "" : "NO_", define_name);
+ else
+ printf("%s is %savailable\n", plain_name, res ? "" : "NOT ");
+}
+
+static void print_kernel_option(const char *name, const char *value)
+{
+ char *endptr;
+ int res;
+
+ /* No support for C-style ouptut */
+
+ if (json_output) {
+ if (!value) {
+ jsonw_null_field(json_wtr, name);
+ return;
+ }
+ errno = 0;
+ res = strtol(value, &endptr, 0);
+ if (!errno && *endptr == '\n')
+ jsonw_int_field(json_wtr, name, res);
+ else
+ jsonw_string_field(json_wtr, name, value);
+ } else {
+ if (value)
+ printf("%s is set to %s\n", name, value);
+ else
+ printf("%s is not set\n", name);
+ }
+}
+
+static void
+print_start_section(const char *json_title, const char *plain_title,
+ const char *define_comment, const char *define_prefix)
+{
+ if (json_output) {
+ jsonw_name(json_wtr, json_title);
+ jsonw_start_object(json_wtr);
+ } else if (define_prefix) {
+ printf("%s\n", define_comment);
+ } else {
+ printf("%s\n", plain_title);
+ }
+}
+
+static void
+print_end_then_start_section(const char *json_title, const char *plain_title,
+ const char *define_comment,
+ const char *define_prefix)
+{
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+
+ print_start_section(json_title, plain_title, define_comment,
+ define_prefix);
+}
+
+/* Probing functions */
+
+static int read_procfs(const char *path)
+{
+ char *endptr, *line = NULL;
+ size_t len = 0;
+ FILE *fd;
+ int res;
+
+ fd = fopen(path, "r");
+ if (!fd)
+ return -1;
+
+ res = getline(&line, &len, fd);
+ fclose(fd);
+ if (res < 0)
+ return -1;
+
+ errno = 0;
+ res = strtol(line, &endptr, 10);
+ if (errno || *line == '\0' || *endptr != '\n')
+ res = -1;
+ free(line);
+
+ return res;
+}
+
+static void probe_unprivileged_disabled(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "unprivileged_bpf_disabled", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("bpf() syscall for unprivileged users is enabled\n");
+ break;
+ case 1:
+ printf("bpf() syscall restricted to privileged users\n");
+ break;
+ case -1:
+ printf("Unable to retrieve required privileges for bpf() syscall\n");
+ break;
+ default:
+ printf("bpf() syscall restriction has unknown value %d\n", res);
+ }
+ }
+}
+
+static void probe_jit_enable(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_enable");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_enable", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler is enabled\n");
+ break;
+ case 2:
+ printf("JIT compiler is enabled with debugging traces in kernel logs\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT-compiler status\n");
+ break;
+ default:
+ printf("JIT-compiler status has unknown value %d\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_harden(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_harden");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_harden", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler hardening is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler hardening is enabled for unprivileged users\n");
+ break;
+ case 2:
+ printf("JIT compiler hardening is enabled for all users\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT hardening status\n");
+ break;
+ default:
+ printf("JIT hardening status has unknown value %d\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_kallsyms(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_kallsyms", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler kallsyms exports are disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler kallsyms exports are enabled for root\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT kallsyms export status\n");
+ break;
+ default:
+ printf("JIT kallsyms exports status has unknown value %d\n", res);
+ }
+ }
+}
+
+static void probe_jit_limit(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_limit");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_limit", res);
+ } else {
+ switch (res) {
+ case -1:
+ printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
+ break;
+ default:
+ printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res);
+ }
+ }
+}
+
+static char *get_kernel_config_option(FILE *fd, const char *option)
+{
+ size_t line_n = 0, optlen = strlen(option);
+ char *res, *strval, *line = NULL;
+ ssize_t n;
+
+ rewind(fd);
+ while ((n = getline(&line, &line_n, fd)) > 0) {
+ if (strncmp(line, option, optlen))
+ continue;
+ /* Check we have at least '=', value, and '\n' */
+ if (strlen(line) < optlen + 3)
+ continue;
+ if (*(line + optlen) != '=')
+ continue;
+
+ /* Trim ending '\n' */
+ line[strlen(line) - 1] = '\0';
+
+ /* Copy and return config option value */
+ strval = line + optlen + 1;
+ res = strdup(strval);
+ free(line);
+ return res;
+ }
+ free(line);
+
+ return NULL;
+}
+
+static void probe_kernel_image_config(void)
+{
+ static const char * const options[] = {
+ /* Enable BPF */
+ "CONFIG_BPF",
+ /* Enable bpf() syscall */
+ "CONFIG_BPF_SYSCALL",
+ /* Does selected architecture support eBPF JIT compiler */
+ "CONFIG_HAVE_EBPF_JIT",
+ /* Compile eBPF JIT compiler */
+ "CONFIG_BPF_JIT",
+ /* Avoid compiling eBPF interpreter (use JIT only) */
+ "CONFIG_BPF_JIT_ALWAYS_ON",
+
+ /* cgroups */
+ "CONFIG_CGROUPS",
+ /* BPF programs attached to cgroups */
+ "CONFIG_CGROUP_BPF",
+ /* bpf_get_cgroup_classid() helper */
+ "CONFIG_CGROUP_NET_CLASSID",
+ /* bpf_skb_{,ancestor_}cgroup_id() helpers */
+ "CONFIG_SOCK_CGROUP_DATA",
+
+ /* Tracing: attach BPF to kprobes, tracepoints, etc. */
+ "CONFIG_BPF_EVENTS",
+ /* Kprobes */
+ "CONFIG_KPROBE_EVENTS",
+ /* Uprobes */
+ "CONFIG_UPROBE_EVENTS",
+ /* Tracepoints */
+ "CONFIG_TRACING",
+ /* Syscall tracepoints */
+ "CONFIG_FTRACE_SYSCALLS",
+ /* bpf_override_return() helper support for selected arch */
+ "CONFIG_FUNCTION_ERROR_INJECTION",
+ /* bpf_override_return() helper */
+ "CONFIG_BPF_KPROBE_OVERRIDE",
+
+ /* Network */
+ "CONFIG_NET",
+ /* AF_XDP sockets */
+ "CONFIG_XDP_SOCKETS",
+ /* BPF_PROG_TYPE_LWT_* and related helpers */
+ "CONFIG_LWTUNNEL_BPF",
+ /* BPF_PROG_TYPE_SCHED_ACT, TC (traffic control) actions */
+ "CONFIG_NET_ACT_BPF",
+ /* BPF_PROG_TYPE_SCHED_CLS, TC filters */
+ "CONFIG_NET_CLS_BPF",
+ /* TC clsact qdisc */
+ "CONFIG_NET_CLS_ACT",
+ /* Ingress filtering with TC */
+ "CONFIG_NET_SCH_INGRESS",
+ /* bpf_skb_get_xfrm_state() helper */
+ "CONFIG_XFRM",
+ /* bpf_get_route_realm() helper */
+ "CONFIG_IP_ROUTE_CLASSID",
+ /* BPF_PROG_TYPE_LWT_SEG6_LOCAL and related helpers */
+ "CONFIG_IPV6_SEG6_BPF",
+ /* BPF_PROG_TYPE_LIRC_MODE2 and related helpers */
+ "CONFIG_BPF_LIRC_MODE2",
+ /* BPF stream parser and BPF socket maps */
+ "CONFIG_BPF_STREAM_PARSER",
+ /* xt_bpf module for passing BPF programs to netfilter */
+ "CONFIG_NETFILTER_XT_MATCH_BPF",
+ /* bpfilter back-end for iptables */
+ "CONFIG_BPFILTER",
+ /* bpftilter module with "user mode helper" */
+ "CONFIG_BPFILTER_UMH",
+
+ /* test_bpf module for BPF tests */
+ "CONFIG_TEST_BPF",
+ };
+ char *value, *buf = NULL;
+ struct utsname utsn;
+ char path[PATH_MAX];
+ size_t i, n;
+ ssize_t ret;
+ FILE *fd;
+
+ if (uname(&utsn))
+ goto no_config;
+
+ snprintf(path, sizeof(path), "/boot/config-%s", utsn.release);
+
+ fd = fopen(path, "r");
+ if (!fd && errno == ENOENT) {
+ /* Some distributions put the config file at /proc/config, give
+ * it a try.
+ * Sometimes it is also at /proc/config.gz but we do not try
+ * this one for now, it would require linking against libz.
+ */
+ fd = fopen("/proc/config", "r");
+ }
+ if (!fd) {
+ p_info("skipping kernel config, can't open file: %s",
+ strerror(errno));
+ goto no_config;
+ }
+ /* Sanity checks */
+ ret = getline(&buf, &n, fd);
+ ret = getline(&buf, &n, fd);
+ if (!buf || !ret) {
+ p_info("skipping kernel config, can't read from file: %s",
+ strerror(errno));
+ free(buf);
+ goto no_config;
+ }
+ if (strcmp(buf, "# Automatically generated file; DO NOT EDIT.\n")) {
+ p_info("skipping kernel config, can't find correct file");
+ free(buf);
+ goto no_config;
+ }
+ free(buf);
+
+ for (i = 0; i < ARRAY_SIZE(options); i++) {
+ value = get_kernel_config_option(fd, options[i]);
+ print_kernel_option(options[i], value);
+ free(value);
+ }
+ fclose(fd);
+ return;
+
+no_config:
+ for (i = 0; i < ARRAY_SIZE(options); i++)
+ print_kernel_option(options[i], NULL);
+}
+
+static bool probe_bpf_syscall(const char *define_prefix)
+{
+ bool res;
+
+ bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0);
+ res = (errno != ENOSYS);
+
+ print_bool_feature("have_bpf_syscall",
+ "bpf() syscall",
+ "BPF_SYSCALL",
+ res, define_prefix);
+
+ return res;
+}
+
+static void
+probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
+ const char *define_prefix, __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF program_type ";
+ size_t maxlen;
+ bool res;
+
+ if (ifindex)
+ /* Only test offload-able program types */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ res = bpf_probe_prog_type(prog_type, ifindex);
+
+ supported_types[prog_type] |= res;
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(prog_type_name[prog_type]) > maxlen) {
+ p_info("program type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]);
+ sprintf(define_name, "%s_prog_type", prog_type_name[prog_type]);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static void
+probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
+ __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF map_type ";
+ size_t maxlen;
+ bool res;
+
+ res = bpf_probe_map_type(map_type, ifindex);
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(map_type_name[map_type]) > maxlen) {
+ p_info("map type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]);
+ sprintf(define_name, "%s_map_type", map_type_name[map_type]);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static void
+probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
+ const char *define_prefix, __u32 ifindex)
+{
+ const char *ptype_name = prog_type_name[prog_type];
+ char feat_name[128];
+ unsigned int id;
+ bool res;
+
+ if (ifindex)
+ /* Only test helpers for offload-able program types */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ if (json_output) {
+ sprintf(feat_name, "%s_available_helpers", ptype_name);
+ jsonw_name(json_wtr, feat_name);
+ jsonw_start_array(json_wtr);
+ } else if (!define_prefix) {
+ printf("eBPF helpers supported for program type %s:",
+ ptype_name);
+ }
+
+ for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
+ if (!supported_type)
+ res = false;
+ else
+ res = bpf_probe_helper(id, prog_type, ifindex);
+
+ if (json_output) {
+ if (res)
+ jsonw_string(json_wtr, helper_name[id]);
+ } else if (define_prefix) {
+ printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
+ define_prefix, ptype_name, helper_name[id],
+ res ? "1" : "0");
+ } else {
+ if (res)
+ printf("\n\t- %s", helper_name[id]);
+ }
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+ else if (!define_prefix)
+ printf("\n");
+}
+
+static int do_probe(int argc, char **argv)
+{
+ enum probe_component target = COMPONENT_UNSPEC;
+ const char *define_prefix = NULL;
+ bool supported_types[128] = {};
+ __u32 ifindex = 0;
+ unsigned int i;
+ char *ifname;
+
+ /* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN).
+ * Let's approximate, and restrict usage to root user only.
+ */
+ if (geteuid()) {
+ p_err("please run this command as root user");
+ return -1;
+ }
+
+ set_max_rlimit();
+
+ while (argc) {
+ if (is_prefix(*argv, "kernel")) {
+ if (target != COMPONENT_UNSPEC) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ target = COMPONENT_KERNEL;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "dev")) {
+ NEXT_ARG();
+
+ if (target != COMPONENT_UNSPEC || ifindex) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ if (!REQ_ARGS(1))
+ return -1;
+
+ target = COMPONENT_DEVICE;
+ ifname = GET_ARG();
+ ifindex = if_nametoindex(ifname);
+ if (!ifindex) {
+ p_err("unrecognized netdevice '%s': %s", ifname,
+ strerror(errno));
+ return -1;
+ }
+ } else if (is_prefix(*argv, "macros") && !define_prefix) {
+ define_prefix = "";
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "prefix")) {
+ if (!define_prefix) {
+ p_err("'prefix' argument can only be use after 'macros'");
+ return -1;
+ }
+ if (strcmp(define_prefix, "")) {
+ p_err("'prefix' already defined");
+ return -1;
+ }
+ NEXT_ARG();
+
+ if (!REQ_ARGS(1))
+ return -1;
+ define_prefix = GET_ARG();
+ } else {
+ p_err("expected no more arguments, 'kernel', 'dev', 'macros' or 'prefix', got: '%s'?",
+ *argv);
+ return -1;
+ }
+ }
+
+ if (json_output) {
+ define_prefix = NULL;
+ jsonw_start_object(json_wtr);
+ }
+
+ switch (target) {
+ case COMPONENT_KERNEL:
+ case COMPONENT_UNSPEC:
+ if (define_prefix)
+ break;
+
+ print_start_section("system_config",
+ "Scanning system configuration...",
+ NULL, /* define_comment never used here */
+ NULL); /* define_prefix always NULL here */
+ if (check_procfs()) {
+ probe_unprivileged_disabled();
+ probe_jit_enable();
+ probe_jit_harden();
+ probe_jit_kallsyms();
+ probe_jit_limit();
+ } else {
+ p_info("/* procfs not mounted, skipping related probes */");
+ }
+ probe_kernel_image_config();
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+ break;
+ default:
+ break;
+ }
+
+ print_start_section("syscall_config",
+ "Scanning system call availability...",
+ "/*** System call availability ***/",
+ define_prefix);
+
+ if (!probe_bpf_syscall(define_prefix))
+ /* bpf() syscall unavailable, don't probe other BPF features */
+ goto exit_close_json;
+
+ print_end_then_start_section("program_types",
+ "Scanning eBPF program types...",
+ "/*** eBPF program types ***/",
+ define_prefix);
+
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_prog_type(i, supported_types, define_prefix, ifindex);
+
+ print_end_then_start_section("map_types",
+ "Scanning eBPF map types...",
+ "/*** eBPF map types ***/",
+ define_prefix);
+
+ for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
+ probe_map_type(i, define_prefix, ifindex);
+
+ print_end_then_start_section("helpers",
+ "Scanning eBPF helper functions...",
+ "/*** eBPF helper functions ***/",
+ define_prefix);
+
+ if (define_prefix)
+ printf("/*\n"
+ " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
+ " * to determine if <helper_name> is available for <prog_type_name>,\n"
+ " * e.g.\n"
+ " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
+ " * // do stuff with this helper\n"
+ " * #elif\n"
+ " * // use a workaround\n"
+ " * #endif\n"
+ " */\n"
+ "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
+ " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
+ define_prefix, define_prefix, define_prefix,
+ define_prefix);
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_helpers_for_progtype(i, supported_types[i],
+ define_prefix, ifindex);
+
+exit_close_json:
+ if (json_output) {
+ /* End current "section" of probes */
+ jsonw_end_object(json_wtr);
+ /* End root object */
+ jsonw_end_object(json_wtr);
+ }
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s %s probe [COMPONENT] [macros [prefix PREFIX]]\n"
+ " %s %s help\n"
+ "\n"
+ " COMPONENT := { kernel | dev NAME }\n"
+ "",
+ bin_name, argv[-2], bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "probe", do_probe },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_feature(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index f44a1c2c4ea0..a9d5e9e6a732 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -56,7 +56,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map | cgroup | perf | net }\n"
+ " OBJECT := { prog | map | cgroup | perf | net | feature }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@@ -187,6 +187,7 @@ static const struct cmd cmds[] = {
{ "cgroup", do_cgroup },
{ "perf", do_perf },
{ "net", do_net },
+ { "feature", do_feature },
{ "version", do_version },
{ 0 }
};
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 052c91d4dc55..d7dd84d3c660 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -75,6 +75,9 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
};
+extern const char * const map_type_name[];
+extern const size_t map_type_name_size;
+
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
BPF_OBJ_PROG,
@@ -145,6 +148,7 @@ int do_cgroup(int argc, char **arg);
int do_perf(int argc, char **arg);
int do_net(int argc, char **arg);
int do_tracelog(int argc, char **arg);
+int do_feature(int argc, char **argv);
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 2037e3dc864b..2160a8ef17e5 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -21,7 +21,7 @@
#include "json_writer.h"
#include "main.h"
-static const char * const map_type_name[] = {
+const char * const map_type_name[] = {
[BPF_MAP_TYPE_UNSPEC] = "unspec",
[BPF_MAP_TYPE_HASH] = "hash",
[BPF_MAP_TYPE_ARRAY] = "array",
@@ -48,6 +48,8 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_STACK] = "stack",
};
+const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
+
static bool map_is_per_cpu(__u32 type)
{
return type == BPF_MAP_TYPE_PERCPU_HASH ||
@@ -285,16 +287,21 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
single_line = info->key_size + info->value_size <= 24 &&
!break_names;
- printf("key:%c", break_names ? '\n' : ' ');
- fprint_hex(stdout, key, info->key_size, " ");
+ if (info->key_size) {
+ printf("key:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, key, info->key_size, " ");
- printf(single_line ? " " : "\n");
+ printf(single_line ? " " : "\n");
+ }
- printf("value:%c", break_names ? '\n' : ' ');
- if (value)
- fprint_hex(stdout, value, info->value_size, " ");
- else
- printf("<no entry>");
+ if (info->value_size) {
+ printf("value:%c", break_names ? '\n' : ' ');
+ if (value)
+ fprint_hex(stdout, value, info->value_size,
+ " ");
+ else
+ printf("<no entry>");
+ }
printf("\n");
} else {
@@ -303,19 +310,23 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
n = get_possible_cpus();
step = round_up(info->value_size, 8);
- printf("key:\n");
- fprint_hex(stdout, key, info->key_size, " ");
- printf("\n");
- for (i = 0; i < n; i++) {
- printf("value (CPU %02d):%c",
- i, info->value_size > 16 ? '\n' : ' ');
- if (value)
- fprint_hex(stdout, value + i * step,
- info->value_size, " ");
- else
- printf("<no entry>");
+ if (info->key_size) {
+ printf("key:\n");
+ fprint_hex(stdout, key, info->key_size, " ");
printf("\n");
}
+ if (info->value_size) {
+ for (i = 0; i < n; i++) {
+ printf("value (CPU %02d):%c",
+ i, info->value_size > 16 ? '\n' : ' ');
+ if (value)
+ fprint_hex(stdout, value + i * step,
+ info->value_size, " ");
+ else
+ printf("<no entry>");
+ printf("\n");
+ }
+ }
}
}
@@ -415,6 +426,9 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
p_err("not enough value arguments for map of progs");
return -1;
}
+ if (is_prefix(*argv, "id"))
+ p_info("Warning: updating program array via MAP_ID, make sure this map is kept open\n"
+ " by some process or pinned otherwise update will be lost");
fd = prog_parse_fd(&argc, &argv);
if (fd < 0)
@@ -779,6 +793,32 @@ exit_free:
return err;
}
+static int alloc_key_value(struct bpf_map_info *info, void **key, void **value)
+{
+ *key = NULL;
+ *value = NULL;
+
+ if (info->key_size) {
+ *key = malloc(info->key_size);
+ if (!*key) {
+ p_err("key mem alloc failed");
+ return -1;
+ }
+ }
+
+ if (info->value_size) {
+ *value = alloc_value(info);
+ if (!*value) {
+ p_err("value mem alloc failed");
+ free(*key);
+ *key = NULL;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int do_update(int argc, char **argv)
{
struct bpf_map_info info = {};
@@ -795,13 +835,9 @@ static int do_update(int argc, char **argv)
if (fd < 0)
return -1;
- key = malloc(info.key_size);
- value = alloc_value(&info);
- if (!key || !value) {
- p_err("mem alloc failed");
- err = -1;
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
goto exit_free;
- }
err = parse_elem(argv, &info, key, value, info.key_size,
info.value_size, &flags, &value_fd);
@@ -826,12 +862,51 @@ exit_free:
return err;
}
+static void print_key_value(struct bpf_map_info *info, void *key,
+ void *value)
+{
+ json_writer_t *btf_wtr;
+ struct btf *btf = NULL;
+ int err;
+
+ err = btf__get_from_id(info->btf_id, &btf);
+ if (err) {
+ p_err("failed to get btf");
+ return;
+ }
+
+ if (json_output) {
+ print_entry_json(info, key, value, btf);
+ } else if (btf) {
+ /* if here json_wtr wouldn't have been initialised,
+ * so let's create separate writer for btf
+ */
+ btf_wtr = get_btf_writer();
+ if (!btf_wtr) {
+ p_info("failed to create json writer for btf. falling back to plain output");
+ btf__free(btf);
+ btf = NULL;
+ print_entry_plain(info, key, value);
+ } else {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, info, key, value);
+ jsonw_destroy(&btf_wtr);
+ }
+ } else {
+ print_entry_plain(info, key, value);
+ }
+ btf__free(btf);
+}
+
static int do_lookup(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
- json_writer_t *btf_wtr;
- struct btf *btf = NULL;
void *key, *value;
int err;
int fd;
@@ -843,13 +918,9 @@ static int do_lookup(int argc, char **argv)
if (fd < 0)
return -1;
- key = malloc(info.key_size);
- value = alloc_value(&info);
- if (!key || !value) {
- p_err("mem alloc failed");
- err = -1;
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
goto exit_free;
- }
err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
if (err)
@@ -873,43 +944,12 @@ static int do_lookup(int argc, char **argv)
}
/* here means bpf_map_lookup_elem() succeeded */
- err = btf__get_from_id(info.btf_id, &btf);
- if (err) {
- p_err("failed to get btf");
- goto exit_free;
- }
-
- if (json_output) {
- print_entry_json(&info, key, value, btf);
- } else if (btf) {
- /* if here json_wtr wouldn't have been initialised,
- * so let's create separate writer for btf
- */
- btf_wtr = get_btf_writer();
- if (!btf_wtr) {
- p_info("failed to create json writer for btf. falling back to plain output");
- btf__free(btf);
- btf = NULL;
- print_entry_plain(&info, key, value);
- } else {
- struct btf_dumper d = {
- .btf = btf,
- .jw = btf_wtr,
- .is_plain_text = true,
- };
-
- do_dump_btf(&d, &info, key, value);
- jsonw_destroy(&btf_wtr);
- }
- } else {
- print_entry_plain(&info, key, value);
- }
+ print_key_value(&info, key, value);
exit_free:
free(key);
free(value);
close(fd);
- btf__free(btf);
return err;
}
@@ -1122,6 +1162,49 @@ static int do_create(int argc, char **argv)
return 0;
}
+static int do_pop_dequeue(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ void *key, *value;
+ int err;
+ int fd;
+
+ if (argc < 2)
+ usage();
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
+ goto exit_free;
+
+ err = bpf_map_lookup_and_delete_elem(fd, key, value);
+ if (err) {
+ if (errno == ENOENT) {
+ if (json_output)
+ jsonw_null(json_wtr);
+ else
+ printf("Error: empty map\n");
+ } else {
+ p_err("pop failed: %s", strerror(errno));
+ }
+
+ goto exit_free;
+ }
+
+ print_key_value(&info, key, value);
+
+exit_free:
+ free(key);
+ free(value);
+ close(fd);
+
+ return err;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1135,12 +1218,17 @@ static int do_help(int argc, char **argv)
" entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
" [dev NAME]\n"
" %s %s dump MAP\n"
- " %s %s update MAP key DATA value VALUE [UPDATE_FLAGS]\n"
- " %s %s lookup MAP key DATA\n"
+ " %s %s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
+ " %s %s lookup MAP [key DATA]\n"
" %s %s getnext MAP [key DATA]\n"
" %s %s delete MAP key DATA\n"
" %s %s pin MAP FILE\n"
" %s %s event_pipe MAP [cpu N index M]\n"
+ " %s %s peek MAP\n"
+ " %s %s push MAP value VALUE\n"
+ " %s %s pop MAP\n"
+ " %s %s enqueue MAP value VALUE\n"
+ " %s %s dequeue MAP\n"
" %s %s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
@@ -1158,7 +1246,8 @@ static int do_help(int argc, char **argv)
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
@@ -1175,6 +1264,11 @@ static const struct cmd cmds[] = {
{ "pin", do_pin },
{ "event_pipe", do_event_pipe },
{ "create", do_create },
+ { "peek", do_lookup },
+ { "push", do_update },
+ { "enqueue", do_update },
+ { "pop", do_pop_dequeue },
+ { "dequeue", do_pop_dequeue },
{ 0 }
};
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 2d1bb7d6ff51..0640e9bc0ada 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -930,10 +930,9 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = libbpf_prog_type_by_name(type, &attr.prog_type,
&expected_attach_type);
free(type);
- if (err < 0) {
- p_err("unknown program type '%s'", *argv);
+ if (err < 0)
goto err_free_reuse_maps;
- }
+
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
void *new_map_replace;
@@ -1028,11 +1027,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = libbpf_prog_type_by_name(sec_name, &prog_type,
&expected_attach_type);
- if (err < 0) {
- p_err("failed to guess program type based on section name %s\n",
- sec_name);
+ if (err < 0)
goto err_close_obj;
- }
}
bpf_program__set_ifindex(pos, ifindex);
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index af55acf73e75..cce0b02c0e28 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -199,6 +199,16 @@
.off = OFF, \
.imm = 0 })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
@@ -209,6 +219,16 @@
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 91c43884f295..60b99b730a41 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -14,6 +14,7 @@
/* Extended instruction set based on top of classic BPF */
/* instruction classes */
+#define BPF_JMP32 0x06 /* jmp mode in word width */
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
@@ -2540,6 +2541,7 @@ struct __sk_buff {
__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
__u64 tstamp;
__u32 wire_len;
+ __u32 gso_segs;
};
struct bpf_tunnel_key {
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 197b40f5b5c6..bfd9bfc82c3b 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 34d9c3619c96..847916273696 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -14,21 +14,6 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
#$(info Determined 'srctree' to be $(srctree))
endif
-# Makefiles suck: This macro sets a default value of $(2) for the
-# variable named by $(1), unless the variable has been set by
-# environment or command line. This is necessary for CC and AR
-# because make sets default values, so the simpler ?= approach
-# won't work as expected.
-define allow-override
- $(if $(or $(findstring environment,$(origin $(1))),\
- $(findstring command line,$(origin $(1)))),,\
- $(eval $(1) = $(2)))
-endef
-
-# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-
INSTALL = install
# Use DESTDIR for installing into a different root directory.
@@ -54,7 +39,7 @@ man_dir_SQ = '$(subst ','\'',$(man_dir))'
export man_dir man_dir_SQ INSTALL
export DESTDIR DESTDIR_SQ
-include ../../scripts/Makefile.include
+include $(srctree)/tools/scripts/Makefile.include
# copy a bit from Linux kbuild
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 169e347c76f6..2ccde17957e6 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -2667,9 +2667,38 @@ static const struct {
#undef BPF_EAPROG_SEC
#undef BPF_APROG_COMPAT
+#define MAX_TYPE_NAME_SIZE 32
+
+static char *libbpf_get_type_names(bool attach_type)
+{
+ int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
+ char *buf;
+
+ buf = malloc(len);
+ if (!buf)
+ return NULL;
+
+ buf[0] = '\0';
+ /* Forge string buf with all available names */
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (attach_type && !section_names[i].is_attachable)
+ continue;
+
+ if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
+ free(buf);
+ return NULL;
+ }
+ strcat(buf, " ");
+ strcat(buf, section_names[i].sec);
+ }
+
+ return buf;
+}
+
int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type)
{
+ char *type_names;
int i;
if (!name)
@@ -2682,12 +2711,20 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
*expected_attach_type = section_names[i].expected_attach_type;
return 0;
}
+ pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
+ type_names = libbpf_get_type_names(false);
+ if (type_names != NULL) {
+ pr_info("supported section(type) names are:%s\n", type_names);
+ free(type_names);
+ }
+
return -EINVAL;
}
int libbpf_attach_type_by_name(const char *name,
enum bpf_attach_type *attach_type)
{
+ char *type_names;
int i;
if (!name)
@@ -2701,6 +2738,13 @@ int libbpf_attach_type_by_name(const char *name,
*attach_type = section_names[i].attach_type;
return 0;
}
+ pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
+ type_names = libbpf_get_type_names(true);
+ if (type_names != NULL) {
+ pr_info("attachable section(type) names are:%s\n", type_names);
+ free(type_names);
+ }
+
return -EINVAL;
}
@@ -2907,8 +2951,6 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
err = bpf_program__identify_section(prog, &prog_type,
&expected_attach_type);
if (err < 0) {
- pr_warning("failed to guess program type based on section name %s\n",
- prog->section_name);
bpf_object__close(obj);
return -EINVAL;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 5f68d7b75215..62ae6cb93da1 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -355,6 +355,20 @@ LIBBPF_API const struct bpf_line_info *
bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
__u32 insn_off, __u32 nr_skip);
+/*
+ * Probe for supported system features
+ *
+ * Note that running many of these probes in a short amount of time can cause
+ * the kernel to reach the maximal size of lockable memory allowed for the
+ * user, causing subsequent probes to fail. In this case, the caller may want
+ * to adjust that limit with setrlimit().
+ */
+LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
+ __u32 ifindex);
+LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
+LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
+ enum bpf_prog_type prog_type, __u32 ifindex);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index cd02cd4e2cc3..266bc95d0142 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -124,3 +124,10 @@ LIBBPF_0.0.1 {
local:
*;
};
+
+LIBBPF_0.0.2 {
+ global:
+ bpf_probe_helper;
+ bpf_probe_map_type;
+ bpf_probe_prog_type;
+} LIBBPF_0.0.1;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
new file mode 100644
index 000000000000..8c3a1c04dcb2
--- /dev/null
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/utsname.h>
+
+#include <linux/filter.h>
+#include <linux/kernel.h>
+
+#include "bpf.h"
+#include "libbpf.h"
+
+static bool grep(const char *buffer, const char *pattern)
+{
+ return !!strstr(buffer, pattern);
+}
+
+static int get_vendor_id(int ifindex)
+{
+ char ifname[IF_NAMESIZE], path[64], buf[8];
+ ssize_t len;
+ int fd;
+
+ if (!if_indextoname(ifindex, ifname))
+ return -1;
+
+ snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ len = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (len < 0)
+ return -1;
+ if (len >= (ssize_t)sizeof(buf))
+ return -1;
+ buf[len] = '\0';
+
+ return strtol(buf, NULL, 0);
+}
+
+static int get_kernel_version(void)
+{
+ int version, subversion, patchlevel;
+ struct utsname utsn;
+
+ /* Return 0 on failure, and attempt to probe with empty kversion */
+ if (uname(&utsn))
+ return 0;
+
+ if (sscanf(utsn.release, "%d.%d.%d",
+ &version, &subversion, &patchlevel) != 3)
+ return 0;
+
+ return (version << 16) + (subversion << 8) + patchlevel;
+}
+
+static void
+probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
+ size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
+{
+ struct bpf_load_program_attr xattr = {};
+ int fd;
+
+ switch (prog_type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
+ break;
+ case BPF_PROG_TYPE_KPROBE:
+ xattr.kern_version = get_kernel_version();
+ break;
+ case BPF_PROG_TYPE_UNSPEC:
+ case BPF_PROG_TYPE_SOCKET_FILTER:
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
+ case BPF_PROG_TYPE_TRACEPOINT:
+ case BPF_PROG_TYPE_XDP:
+ case BPF_PROG_TYPE_PERF_EVENT:
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_LWT_IN:
+ case BPF_PROG_TYPE_LWT_OUT:
+ case BPF_PROG_TYPE_LWT_XMIT:
+ case BPF_PROG_TYPE_SOCK_OPS:
+ case BPF_PROG_TYPE_SK_SKB:
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
+ case BPF_PROG_TYPE_SK_MSG:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_LWT_SEG6LOCAL:
+ case BPF_PROG_TYPE_LIRC_MODE2:
+ case BPF_PROG_TYPE_SK_REUSEPORT:
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ default:
+ break;
+ }
+
+ xattr.prog_type = prog_type;
+ xattr.insns = insns;
+ xattr.insns_cnt = insns_cnt;
+ xattr.license = "GPL";
+ xattr.prog_ifindex = ifindex;
+
+ fd = bpf_load_program_xattr(&xattr, buf, buf_len);
+ if (fd >= 0)
+ close(fd);
+}
+
+bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN()
+ };
+
+ if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
+ /* nfp returns -EINVAL on exit(0) with TC offload */
+ insns[0].imm = 2;
+
+ errno = 0;
+ probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
+
+ return errno != EINVAL && errno != EOPNOTSUPP;
+}
+
+bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
+{
+ int key_size, value_size, max_entries, map_flags;
+ struct bpf_create_map_attr attr = {};
+ int fd = -1, fd_inner;
+
+ key_size = sizeof(__u32);
+ value_size = sizeof(__u32);
+ max_entries = 1;
+ map_flags = 0;
+
+ switch (map_type) {
+ case BPF_MAP_TYPE_STACK_TRACE:
+ value_size = sizeof(__u64);
+ break;
+ case BPF_MAP_TYPE_LPM_TRIE:
+ key_size = sizeof(__u64);
+ value_size = sizeof(__u64);
+ map_flags = BPF_F_NO_PREALLOC;
+ break;
+ case BPF_MAP_TYPE_CGROUP_STORAGE:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+ key_size = sizeof(struct bpf_cgroup_storage_key);
+ value_size = sizeof(__u64);
+ max_entries = 0;
+ break;
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ key_size = 0;
+ break;
+ case BPF_MAP_TYPE_UNSPEC:
+ case BPF_MAP_TYPE_HASH:
+ case BPF_MAP_TYPE_ARRAY:
+ case BPF_MAP_TYPE_PROG_ARRAY:
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_CGROUP_ARRAY:
+ case BPF_MAP_TYPE_LRU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_CPUMAP:
+ case BPF_MAP_TYPE_XSKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ default:
+ break;
+ }
+
+ if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+ map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+ /* TODO: probe for device, once libbpf has a function to create
+ * map-in-map for offload
+ */
+ if (ifindex)
+ return false;
+
+ fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
+ sizeof(__u32), sizeof(__u32), 1, 0);
+ if (fd_inner < 0)
+ return false;
+ fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
+ fd_inner, 1, 0);
+ close(fd_inner);
+ } else {
+ /* Note: No other restriction on map type probes for offload */
+ attr.map_type = map_type;
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.max_entries = max_entries;
+ attr.map_flags = map_flags;
+ attr.map_ifindex = ifindex;
+
+ fd = bpf_create_map_xattr(&attr);
+ }
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0;
+}
+
+bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
+ __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_EMIT_CALL(id),
+ BPF_EXIT_INSN()
+ };
+ char buf[4096] = {};
+ bool res;
+
+ probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
+ ifindex);
+ res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
+
+ if (ifindex) {
+ switch (get_vendor_id(ifindex)) {
+ case 0x19ee: /* Netronome specific */
+ res = res && !grep(buf, "not supported by FW") &&
+ !grep(buf, "unsupported function id");
+ break;
+ default:
+ break;
+ }
+ }
+
+ return res;
+}
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 41ab7a3668b3..8993e9c8f410 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -10,15 +10,14 @@ ifneq ($(wildcard $(GENHDR)),)
GENFLAGS := -DHAVE_GENHDR
endif
+CLANG ?= clang
+LLC ?= llc
+LLVM_OBJCOPY ?= llvm-objcopy
+LLVM_READELF ?= llvm-readelf
+BTF_PAHOLE ?= pahole
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
LDLIBS += -lcap -lelf -lrt -lpthread
-TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
-all: $(TEST_CUSTOM_PROGS)
-
-$(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
- $(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id
-
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
@@ -26,21 +25,42 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
test_netcnt test_tcpnotify_user
-TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
- test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
- sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
- test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
- test_tcpnotify_kern.o \
- sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \
- sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o test_adjust_tail.o \
- test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \
- test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \
- test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
+BPF_OBJ_FILES = \
+ test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
+ sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o \
+ test_tcpnotify_kern.o sample_map_ret0.o test_tcpbpf_kern.o \
+ sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o \
+ test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o \
+ test_tunnel_kern.o test_sockhash_kern.o test_lwt_seg6local.o \
+ sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
- test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \
- test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o \
+ test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o test_xdp_vlan.o \
xdp_dummy.o test_map_in_map.o
+# Objects are built with default compilation flags and with sub-register
+# code-gen enabled.
+BPF_OBJ_FILES_DUAL_COMPILE = \
+ test_pkt_access.o test_pkt_access.o test_xdp.o test_adjust_tail.o \
+ test_l4lb.o test_l4lb_noinline.o test_xdp_noinline.o test_tcp_estats.o \
+ test_obj_id.o test_pkt_md_access.o test_tracepoint.o \
+ test_stacktrace_map.o test_stacktrace_map.o test_stacktrace_build_id.o \
+ test_stacktrace_build_id.o test_get_stack_rawtp.o \
+ test_get_stack_rawtp.o test_tracepoint.o test_sk_lookup_kern.o \
+ test_queue_map.o test_stack_map.o
+
+TEST_GEN_FILES = $(BPF_OBJ_FILES) $(BPF_OBJ_FILES_DUAL_COMPILE)
+
+# Also test sub-register code-gen if LLVM + kernel both has eBPF v3 processor
+# support which is the first version to contain both ALU32 and JMP32
+# instructions.
+SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
+ $(CLANG) -target bpf -O2 -emit-llvm -S -x c - -o - | \
+ $(LLC) -mattr=+alu32 -mcpu=probe 2>&1 | \
+ grep 'if w')
+ifneq ($(SUBREG_CODEGEN),)
+TEST_GEN_FILES += $(patsubst %.o,alu32/%.o, $(BPF_OBJ_FILES_DUAL_COMPILE))
+endif
+
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
test_libbpf.sh \
@@ -66,6 +86,13 @@ TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_use
include ../lib.mk
+# NOTE: $(OUTPUT) won't get default value if used before lib.mk
+TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
+all: $(TEST_CUSTOM_PROGS)
+
+$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
+ $(CC) -o $@ -static $< -Wl,--build-id
+
BPFOBJ := $(OUTPUT)/libbpf.a
$(TEST_GEN_PROGS): $(BPFOBJ)
@@ -93,11 +120,6 @@ force:
$(BPFOBJ): force
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
-CLANG ?= clang
-LLC ?= llc
-LLVM_OBJCOPY ?= llvm-objcopy
-BTF_PAHOLE ?= pahole
-
PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
# Let newer LLVM versions transparently probe the kernel for availability
@@ -127,12 +149,15 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
$(OUTPUT)/test_queue_map.o: test_queue_stack_map.h
$(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
+$(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
+$(OUTPUT)/test_progs.o: flow_dissector_load.h
+
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
$(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
- readelf -S ./llvm_btf_verify.o | grep BTF; \
+ $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
/bin/rm -f ./llvm_btf_verify.o)
ifneq ($(BTF_LLVM_PROBE),)
@@ -149,6 +174,30 @@ endif
endif
endif
+ifneq ($(SUBREG_CODEGEN),)
+ALU32_BUILD_DIR = $(OUTPUT)/alu32
+TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32
+$(ALU32_BUILD_DIR):
+ mkdir -p $@
+
+$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read
+ cp $< $@
+
+$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(ALU32_BUILD_DIR) \
+ $(ALU32_BUILD_DIR)/urandom_read
+ $(CC) $(CFLAGS) -o $(ALU32_BUILD_DIR)/test_progs_32 $< \
+ trace_helpers.c $(OUTPUT)/libbpf.a $(LDLIBS)
+
+$(ALU32_BUILD_DIR)/%.o: %.c $(ALU32_BUILD_DIR) $(ALU32_BUILD_DIR)/test_progs_32
+ $(CLANG) $(CLANG_FLAGS) \
+ -O2 -target bpf -emit-llvm -c $< -o - | \
+ $(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
+ -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+endif
+endif
+
# Have one program compiled without "-target bpf" to test whether libbpf loads
# it successfully
$(OUTPUT)/test_xdp.o: test_xdp.c
@@ -167,4 +216,17 @@ ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
+$(OUTPUT)/test_verifier: $(OUTPUT)/verifier/tests.h
+$(OUTPUT)/test_verifier: CFLAGS += -I$(OUTPUT)
+
+VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
+$(OUTPUT)/verifier/tests.h: $(VERIFIER_TEST_FILES)
+ $(shell ( cd verifier/
+ echo '/* Generated header, do not edit */'; \
+ echo '#ifdef FILL_ARRAY'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\(.*\)@#include \"\1\"@'; \
+ echo '#endif' \
+ ) > $(OUTPUT)/verifier/tests.h)
+
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR)
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index ae8180b11d5f..77cafa66d048 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -12,6 +12,7 @@
#include <bpf/libbpf.h>
#include "bpf_rlimit.h"
+#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
const char *cfg_map_name = "jmp_table";
@@ -21,46 +22,13 @@ char *cfg_path_name;
static void load_and_attach_program(void)
{
- struct bpf_program *prog, *main_prog;
- struct bpf_map *prog_array;
- int i, fd, prog_fd, ret;
+ int prog_fd, ret;
struct bpf_object *obj;
- int prog_array_fd;
- ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj,
- &prog_fd);
+ ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
+ cfg_map_name, &prog_fd);
if (ret)
- error(1, 0, "bpf_prog_load %s", cfg_path_name);
-
- main_prog = bpf_object__find_program_by_title(obj, cfg_section_name);
- if (!main_prog)
- error(1, 0, "bpf_object__find_program_by_title %s",
- cfg_section_name);
-
- prog_fd = bpf_program__fd(main_prog);
- if (prog_fd < 0)
- error(1, 0, "bpf_program__fd");
-
- prog_array = bpf_object__find_map_by_name(obj, cfg_map_name);
- if (!prog_array)
- error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name);
-
- prog_array_fd = bpf_map__fd(prog_array);
- if (prog_array_fd < 0)
- error(1, 0, "bpf_map__fd %s", cfg_map_name);
-
- i = 0;
- bpf_object__for_each_program(prog, obj) {
- fd = bpf_program__fd(prog);
- if (fd < 0)
- error(1, 0, "bpf_program__fd");
-
- if (fd != prog_fd) {
- printf("%d: %s\n", i, bpf_program__title(prog, false));
- bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
- ++i;
- }
- }
+ error(1, 0, "bpf_flow_load %s", cfg_path_name);
ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0);
if (ret)
@@ -69,7 +37,6 @@ static void load_and_attach_program(void)
ret = bpf_object__pin(obj, cfg_pin_path);
if (ret)
error(1, 0, "bpf_object__pin %s", cfg_pin_path);
-
}
static void detach_program(void)
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
new file mode 100644
index 000000000000..41dd6959feb0
--- /dev/null
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef FLOW_DISSECTOR_LOAD
+#define FLOW_DISSECTOR_LOAD
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+static inline int bpf_flow_load(struct bpf_object **obj,
+ const char *path,
+ const char *section_name,
+ const char *map_name,
+ int *prog_fd)
+{
+ struct bpf_program *prog, *main_prog;
+ struct bpf_map *prog_array;
+ int prog_array_fd;
+ int ret, fd, i;
+
+ ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
+ prog_fd);
+ if (ret)
+ return ret;
+
+ main_prog = bpf_object__find_program_by_title(*obj, section_name);
+ if (!main_prog)
+ return ret;
+
+ *prog_fd = bpf_program__fd(main_prog);
+ if (*prog_fd < 0)
+ return ret;
+
+ prog_array = bpf_object__find_map_by_name(*obj, map_name);
+ if (!prog_array)
+ return ret;
+
+ prog_array_fd = bpf_map__fd(prog_array);
+ if (prog_array_fd < 0)
+ return ret;
+
+ i = 0;
+ bpf_object__for_each_program(prog, *obj) {
+ fd = bpf_program__fd(prog);
+ if (fd < 0)
+ return fd;
+
+ if (fd != *prog_fd) {
+ bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
+ ++i;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* FLOW_DISSECTOR_LOAD */
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index a0bd04befe87..179f1d8ec5bf 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -18,6 +18,7 @@
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
+#include <assert.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
@@ -134,6 +135,12 @@ static struct btf_header hdr_tmpl = {
.hdr_len = sizeof(struct btf_header),
};
+/* several different mapv kinds(types) supported by pprint */
+enum pprint_mapv_kind_t {
+ PPRINT_MAPV_KIND_BASIC = 0,
+ PPRINT_MAPV_KIND_INT128,
+};
+
struct btf_raw_test {
const char *descr;
const char *str_sec;
@@ -156,6 +163,7 @@ struct btf_raw_test {
int type_off_delta;
int str_off_delta;
int str_len_delta;
+ enum pprint_mapv_kind_t mapv_kind;
};
#define BTF_STR_SEC(str) \
@@ -2707,6 +2715,99 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "Invalid member offset",
},
+{
+ .descr = "128-bit int",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "int_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, 128-bit int member",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, 120-bit int member bitfield",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 120, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, kind_flag, 128-bit int member",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, kind_flag, 120-bit int member bitfield",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(120, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
}; /* struct btf_raw_test raw_tests[] */
static const char *get_next_str(const char *start, const char *end)
@@ -3530,6 +3631,16 @@ struct pprint_mapv {
uint32_t bits2c:2;
};
+#ifdef __SIZEOF_INT128__
+struct pprint_mapv_int128 {
+ __int128 si128a;
+ __int128 si128b;
+ unsigned __int128 bits3:3;
+ unsigned __int128 bits80:80;
+ unsigned __int128 ui128;
+};
+#endif
+
static struct btf_raw_test pprint_test_template[] = {
{
.raw_types = {
@@ -3721,6 +3832,35 @@ static struct btf_raw_test pprint_test_template[] = {
.max_entries = 128 * 1024,
},
+#ifdef __SIZEOF_INT128__
+{
+ /* test int128 */
+ .raw_types = {
+ /* unsigned int */ /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
+ /* __int128 */ /* [2] */
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 128, 16),
+ /* unsigned __int128 */ /* [3] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 128, 16),
+ /* struct pprint_mapv_int128 */ /* [4] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 5), 64),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), /* si128a */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 128)), /* si128b */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(3, 256)), /* bits3 */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(80, 259)), /* bits80 */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(0, 384)), /* ui128 */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0unsigned int\0__int128\0unsigned __int128\0pprint_mapv_int128\0si128a\0si128b\0bits3\0bits80\0ui128"),
+ .key_size = sizeof(unsigned int),
+ .value_size = sizeof(struct pprint_mapv_int128),
+ .key_type_id = 1,
+ .value_type_id = 4,
+ .max_entries = 128 * 1024,
+ .mapv_kind = PPRINT_MAPV_KIND_INT128,
+},
+#endif
+
};
static struct btf_pprint_test_meta {
@@ -3787,24 +3927,108 @@ static struct btf_pprint_test_meta {
};
+static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
+{
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC)
+ return sizeof(struct pprint_mapv);
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128)
+ return sizeof(struct pprint_mapv_int128);
+#endif
-static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i,
+ assert(0);
+}
+
+static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
+ void *mapv, uint32_t i,
int num_cpus, int rounded_value_size)
{
int cpu;
- for (cpu = 0; cpu < num_cpus; cpu++) {
- v->ui32 = i + cpu;
- v->si32 = -i;
- v->unused_bits2a = 3;
- v->bits28 = i;
- v->unused_bits2b = 3;
- v->ui64 = i;
- v->aenum = i & 0x03;
- v->ui32b = 4;
- v->bits2c = 1;
- v = (void *)v + rounded_value_size;
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
+ struct pprint_mapv *v = mapv;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ v->ui32 = i + cpu;
+ v->si32 = -i;
+ v->unused_bits2a = 3;
+ v->bits28 = i;
+ v->unused_bits2b = 3;
+ v->ui64 = i;
+ v->aenum = i & 0x03;
+ v->ui32b = 4;
+ v->bits2c = 1;
+ v = (void *)v + rounded_value_size;
+ }
+ }
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
+ struct pprint_mapv_int128 *v = mapv;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ v->si128a = i;
+ v->si128b = -i;
+ v->bits3 = i & 0x07;
+ v->bits80 = (((unsigned __int128)1) << 64) + i;
+ v->ui128 = (((unsigned __int128)2) << 64) + i;
+ v = (void *)v + rounded_value_size;
+ }
}
+#endif
+}
+
+ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
+ char *expected_line, ssize_t line_size,
+ bool percpu_map, unsigned int next_key,
+ int cpu, void *mapv)
+{
+ ssize_t nexpected_line = -1;
+
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
+ struct pprint_mapv *v = mapv;
+
+ nexpected_line = snprintf(expected_line, line_size,
+ "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
+ "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
+ "%u,0x%x}\n",
+ percpu_map ? "\tcpu" : "",
+ percpu_map ? cpu : next_key,
+ v->ui32, v->si32,
+ v->unused_bits2a,
+ v->bits28,
+ v->unused_bits2b,
+ v->ui64,
+ v->ui8a[0], v->ui8a[1],
+ v->ui8a[2], v->ui8a[3],
+ v->ui8a[4], v->ui8a[5],
+ v->ui8a[6], v->ui8a[7],
+ pprint_enum_str[v->aenum],
+ v->ui32b,
+ v->bits2c);
+ }
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
+ struct pprint_mapv_int128 *v = mapv;
+
+ nexpected_line = snprintf(expected_line, line_size,
+ "%s%u: {0x%lx,0x%lx,0x%lx,"
+ "0x%lx%016lx,0x%lx%016lx}\n",
+ percpu_map ? "\tcpu" : "",
+ percpu_map ? cpu : next_key,
+ (uint64_t)v->si128a,
+ (uint64_t)v->si128b,
+ (uint64_t)v->bits3,
+ (uint64_t)(v->bits80 >> 64),
+ (uint64_t)v->bits80,
+ (uint64_t)(v->ui128 >> 64),
+ (uint64_t)v->ui128);
+ }
+#endif
+
+ return nexpected_line;
}
static int check_line(const char *expected_line, int nexpected_line,
@@ -3828,10 +4052,10 @@ static int check_line(const char *expected_line, int nexpected_line,
static int do_test_pprint(int test_num)
{
const struct btf_raw_test *test = &pprint_test_template[test_num];
+ enum pprint_mapv_kind_t mapv_kind = test->mapv_kind;
struct bpf_create_map_attr create_attr = {};
bool ordered_map, lossless_map, percpu_map;
int err, ret, num_cpus, rounded_value_size;
- struct pprint_mapv *mapv = NULL;
unsigned int key, nr_read_elems;
int map_fd = -1, btf_fd = -1;
unsigned int raw_btf_size;
@@ -3840,6 +4064,7 @@ static int do_test_pprint(int test_num)
char pin_path[255];
size_t line_len = 0;
char *line = NULL;
+ void *mapv = NULL;
uint8_t *raw_btf;
ssize_t nread;
@@ -3892,7 +4117,7 @@ static int do_test_pprint(int test_num)
percpu_map = test->percpu_map;
num_cpus = percpu_map ? bpf_num_possible_cpus() : 1;
- rounded_value_size = round_up(sizeof(struct pprint_mapv), 8);
+ rounded_value_size = round_up(get_pprint_mapv_size(mapv_kind), 8);
mapv = calloc(num_cpus, rounded_value_size);
if (CHECK(!mapv, "mapv allocation failure")) {
err = -1;
@@ -3900,7 +4125,7 @@ static int do_test_pprint(int test_num)
}
for (key = 0; key < test->max_entries; key++) {
- set_pprint_mapv(mapv, key, num_cpus, rounded_value_size);
+ set_pprint_mapv(mapv_kind, mapv, key, num_cpus, rounded_value_size);
bpf_map_update_elem(map_fd, &key, mapv, 0);
}
@@ -3924,13 +4149,13 @@ static int do_test_pprint(int test_num)
ordered_map = test->ordered_map;
lossless_map = test->lossless_map;
do {
- struct pprint_mapv *cmapv;
ssize_t nexpected_line;
unsigned int next_key;
+ void *cmapv;
int cpu;
next_key = ordered_map ? nr_read_elems : atoi(line);
- set_pprint_mapv(mapv, next_key, num_cpus, rounded_value_size);
+ set_pprint_mapv(mapv_kind, mapv, next_key, num_cpus, rounded_value_size);
cmapv = mapv;
for (cpu = 0; cpu < num_cpus; cpu++) {
@@ -3963,31 +4188,16 @@ static int do_test_pprint(int test_num)
break;
}
- nexpected_line = snprintf(expected_line, sizeof(expected_line),
- "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
- "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
- "%u,0x%x}\n",
- percpu_map ? "\tcpu" : "",
- percpu_map ? cpu : next_key,
- cmapv->ui32, cmapv->si32,
- cmapv->unused_bits2a,
- cmapv->bits28,
- cmapv->unused_bits2b,
- cmapv->ui64,
- cmapv->ui8a[0], cmapv->ui8a[1],
- cmapv->ui8a[2], cmapv->ui8a[3],
- cmapv->ui8a[4], cmapv->ui8a[5],
- cmapv->ui8a[6], cmapv->ui8a[7],
- pprint_enum_str[cmapv->aenum],
- cmapv->ui32b,
- cmapv->bits2c);
-
+ nexpected_line = get_pprint_expected_line(mapv_kind, expected_line,
+ sizeof(expected_line),
+ percpu_map, next_key,
+ cpu, cmapv);
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
if (err == -1)
goto done;
- cmapv = (void *)cmapv + rounded_value_size;
+ cmapv = cmapv + rounded_value_size;
}
if (percpu_map) {
@@ -4083,6 +4293,10 @@ static struct prog_info_raw_test {
__u32 line_info_rec_size;
__u32 nr_jited_ksyms;
bool expected_prog_load_failure;
+ __u32 dead_code_cnt;
+ __u32 dead_code_mask;
+ __u32 dead_func_cnt;
+ __u32 dead_func_mask;
} info_raw_tests[] = {
{
.descr = "func_type (main func + one sub)",
@@ -4509,6 +4723,369 @@ static struct prog_info_raw_test {
.expected_prog_load_failure = true,
},
+{
+ .descr = "line_info (dead start)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0/* dead jmp */\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 6),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 1,
+ .dead_code_mask = 0x01,
+},
+
+{
+ .descr = "line_info (dead end)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0/* dead jmp */\0return a + b;\0/* dead exit */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 12),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 11),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 9),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 8),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 6, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 2,
+ .dead_code_mask = 0x28,
+},
+
+{
+ .descr = "line_info (dead code + subprog + func_info)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0/* dead jmp */"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 8),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {14, 3} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(14, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(16, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 9,
+ .dead_code_mask = 0x3fe,
+},
+
+{
+ .descr = "line_info (dead subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
+ "\0return 0;\0return 0;\0/* dead */\0/* dead */"
+ "\0/* dead */\0return bla + 1;\0return bla + 1;"
+ "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {6, 3}, {9, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 3,
+ .dead_code_mask = 0x70,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead last subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0int a=1+1;\0/* live call */"
+ "\0return 0;\0/* dead */\0/* dead */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {5, 3} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 2,
+ .dead_code_mask = 0x18,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead subprog + dead start)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* dead */"
+ "\0return 0;\0return 0;\0return 0;"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0return b + 1;\0return b + 1;\0return b + 1;"),
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {7, 3}, {10, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(13, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 5,
+ .dead_code_mask = 0x1e2,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead subprog + dead start w/ move)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
+ "\0return 0;\0return 0;\0/* dead */\0/* dead */"
+ "\0/* dead */\0return bla + 1;\0return bla + 1;"
+ "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {6, 3}, {9, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 3,
+ .dead_code_mask = 0x70,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead end + subprog start w/ no linfo)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0main\0func\0/* main linfo */\0/* func linfo */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 1, 3),
+ BPF_CALL_REL(3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 3}, {6, 4}, },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+},
+
};
static size_t probe_prog_length(const struct bpf_insn *fp)
@@ -4568,6 +5145,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
struct bpf_func_info *finfo;
__u32 info_len, rec_size, i;
void *func_info = NULL;
+ __u32 nr_func_info;
int err;
/* get necessary lens */
@@ -4577,7 +5155,8 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
fprintf(stderr, "%s\n", btf_log_buf);
return -1;
}
- if (CHECK(info.nr_func_info != test->func_info_cnt,
+ nr_func_info = test->func_info_cnt - test->dead_func_cnt;
+ if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (1st) %d",
info.nr_func_info)) {
return -1;
@@ -4598,7 +5177,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
/* reset info to only retrieve func_info related data */
memset(&info, 0, sizeof(info));
- info.nr_func_info = test->func_info_cnt;
+ info.nr_func_info = nr_func_info;
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
@@ -4607,7 +5186,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
err = -1;
goto done;
}
- if (CHECK(info.nr_func_info != test->func_info_cnt,
+ if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (2nd) %d",
info.nr_func_info)) {
err = -1;
@@ -4621,7 +5200,9 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
}
finfo = func_info;
- for (i = 0; i < test->func_info_cnt; i++) {
+ for (i = 0; i < nr_func_info; i++) {
+ if (test->dead_func_mask & (1 << i))
+ continue;
if (CHECK(finfo->type_id != test->func_info[i][1],
"incorrect func_type %u expected %u",
finfo->type_id, test->func_info[i][1])) {
@@ -4650,6 +5231,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
struct bpf_prog_info info = {};
__u32 *jited_func_lens = NULL;
__u64 cur_func_ksyms;
+ __u32 dead_insns;
int err;
jited_cnt = cnt;
@@ -4658,7 +5240,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
if (test->nr_jited_ksyms)
nr_jited_ksyms = test->nr_jited_ksyms;
else
- nr_jited_ksyms = test->func_info_cnt;
+ nr_jited_ksyms = test->func_info_cnt - test->dead_func_cnt;
nr_jited_func_lens = nr_jited_ksyms;
info_len = sizeof(struct bpf_prog_info);
@@ -4760,12 +5342,20 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
goto done;
}
+ dead_insns = 0;
+ while (test->dead_code_mask & (1 << dead_insns))
+ dead_insns++;
+
CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u",
linfo[0].insn_off);
for (i = 1; i < cnt; i++) {
const struct bpf_line_info *expected_linfo;
- expected_linfo = patched_linfo + (i * test->line_info_rec_size);
+ while (test->dead_code_mask & (1 << (i + dead_insns)))
+ dead_insns++;
+
+ expected_linfo = patched_linfo +
+ ((i + dead_insns) * test->line_info_rec_size);
if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off,
"linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u",
i, linfo[i].insn_off,
@@ -4923,7 +5513,9 @@ static int do_test_info_raw(unsigned int test_num)
if (err)
goto done;
- err = test_get_linfo(test, patched_linfo, attr.line_info_cnt, prog_fd);
+ err = test_get_linfo(test, patched_linfo,
+ attr.line_info_cnt - test->dead_code_cnt,
+ prog_fd);
if (err)
goto done;
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c
index 12b784afba31..01f0c634d548 100644
--- a/tools/testing/selftests/bpf/test_flow_dissector.c
+++ b/tools/testing/selftests/bpf/test_flow_dissector.c
@@ -16,7 +16,6 @@
#include <errno.h>
#include <linux/if_packet.h>
#include <linux/if_ether.h>
-#include <linux/if_packet.h>
#include <linux/ipv6.h>
#include <netinet/ip.h>
#include <netinet/in.h>
@@ -25,7 +24,6 @@
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index e2b9eee37187..1dfef77cff6f 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -633,7 +633,6 @@ static void test_stackmap(int task, void *data)
close(fd);
}
-#include <sys/socket.h>
#include <sys/ioctl.h>
#include <arpa/inet.h>
#include <sys/select.h>
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 25f0083a9b2e..d8940b8b2f8d 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -39,6 +39,7 @@ typedef __u16 __sum16;
#include "bpf_endian.h"
#include "bpf_rlimit.h"
#include "trace_helpers.h"
+#include "flow_dissector_load.h"
static int error_cnt, pass_cnt;
static bool jit_enabled;
@@ -53,9 +54,10 @@ static struct {
} __packed pkt_v4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
- .iph.protocol = 6,
+ .iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
+ .tcp.doff = 5,
};
/* ipv6 test vector */
@@ -65,9 +67,10 @@ static struct {
struct tcphdr tcp;
} __packed pkt_v6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
- .iph.nexthdr = 6,
+ .iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
+ .tcp.doff = 5,
};
#define _CHECK(condition, tag, duration, format...) ({ \
@@ -1912,6 +1915,76 @@ out:
bpf_object__close(obj);
}
+#define CHECK_FLOW_KEYS(desc, got, expected) \
+ CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \
+ desc, \
+ "nhoff=%u/%u " \
+ "thoff=%u/%u " \
+ "addr_proto=0x%x/0x%x " \
+ "is_frag=%u/%u " \
+ "is_first_frag=%u/%u " \
+ "is_encap=%u/%u " \
+ "n_proto=0x%x/0x%x " \
+ "sport=%u/%u " \
+ "dport=%u/%u\n", \
+ got.nhoff, expected.nhoff, \
+ got.thoff, expected.thoff, \
+ got.addr_proto, expected.addr_proto, \
+ got.is_frag, expected.is_frag, \
+ got.is_first_frag, expected.is_first_frag, \
+ got.is_encap, expected.is_encap, \
+ got.n_proto, expected.n_proto, \
+ got.sport, expected.sport, \
+ got.dport, expected.dport)
+
+static struct bpf_flow_keys pkt_v4_flow_keys = {
+ .nhoff = 0,
+ .thoff = sizeof(struct iphdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = bpf_htons(ETH_P_IP),
+};
+
+static struct bpf_flow_keys pkt_v6_flow_keys = {
+ .nhoff = 0,
+ .thoff = sizeof(struct ipv6hdr),
+ .addr_proto = ETH_P_IPV6,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = bpf_htons(ETH_P_IPV6),
+};
+
+static void test_flow_dissector(void)
+{
+ struct bpf_flow_keys flow_keys;
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+ __u32 size;
+
+ err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
+ "jmp_table", &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4",
+ "err %d errno %d retval %d duration %d size %u/%lu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+ CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys);
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6",
+ "err %d errno %d retval %d duration %d size %u/%lu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+ CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
+
+ bpf_object__close(obj);
+}
+
int main(void)
{
srand(time(NULL));
@@ -1939,6 +2012,7 @@ int main(void)
test_reference_tracking();
test_queue_stack_map(QUEUE);
test_queue_stack_map(STACK);
+ test_flow_dissector();
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index fc7832ee566b..e51d63786ff8 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -158,10 +158,8 @@ static int run_test(int cgfd)
bpf_object__for_each_program(prog, pobj) {
prog_name = bpf_program__title(prog, /*needs_copy*/ false);
- if (libbpf_attach_type_by_name(prog_name, &attach_type)) {
- log_err("Unexpected prog: %s", prog_name);
+ if (libbpf_attach_type_by_name(prog_name, &attach_type))
goto err;
- }
err = bpf_prog_attach(bpf_program__fd(prog), cgfd, attach_type,
BPF_F_ALLOW_OVERRIDE);
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index e85a771f607b..3845144e2c91 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -10,7 +10,6 @@
#include <unistd.h>
#include <string.h>
#include <errno.h>
-#include <sys/ioctl.h>
#include <stdbool.h>
#include <signal.h>
#include <fcntl.h>
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 4e4353711a86..86152d9ae95b 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -148,17 +148,17 @@ int main(int argc, char **argv)
pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd);
sprintf(test_script,
- "/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP",
+ "iptables -A INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
sprintf(test_script,
- "/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
+ "nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
TESTPORT);
system(test_script);
sprintf(test_script,
- "/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP",
+ "iptables -D INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 2fd90d456892..c5e22422a852 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -211,15394 +211,46 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
BPF_MOV64_IMM(BPF_REG_5, 0), \
BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
-static struct bpf_test tests[] = {
- {
- "add+sub+mul",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_2, 3),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -3,
- },
- {
- "DIV32 by 0, zero check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV32 by 0, zero check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV64 by 0, zero check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD32 by 0, zero check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD32 by 0, zero check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD64 by 0, zero check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV32 by 0, zero check ok, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 2),
- BPF_MOV32_IMM(BPF_REG_2, 16),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 8,
- },
- {
- "DIV32 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV32 by 0, zero check 2, cls",
- .insns = {
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 by 0, zero check, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "MOD32 by 0, zero check ok, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 3),
- BPF_MOV32_IMM(BPF_REG_2, 5),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "MOD32 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD32 by 0, zero check 2, cls",
- .insns = {
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD64 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 2),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "MOD64 by 0, zero check 2, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, -1),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = -1,
- },
- /* Just make sure that JITs used udiv/umod as otherwise we get
- * an exception from INT_MIN/-1 overflow similarly as with div
- * by zero.
- */
- {
- "DIV32 overflow, check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV32 overflow, check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 overflow, check 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 overflow, check 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "MOD32 overflow, check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = INT_MIN,
- },
- {
- "MOD32 overflow, check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = INT_MIN,
- },
- {
- "MOD64 overflow, check 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD64 overflow, check 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "xor32 zero extend check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_2, -1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
- BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
- BPF_MOV32_IMM(BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "empty prog",
- .insns = {
- },
- .errstr = "unknown opcode 00",
- .result = REJECT,
- },
- {
- "only exit insn",
- .insns = {
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "unreachable",
- .insns = {
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .errstr = "unreachable",
- .result = REJECT,
- },
- {
- "unreachable2",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unreachable",
- .result = REJECT,
- },
- {
- "out of range jump",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "out of range jump2",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, -2),
- BPF_EXIT_INSN(),
- },
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "test1 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "test2 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "test3 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test4 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test5 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test6 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_RAW_INSN(0, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "test7 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "test8 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "uses reserved fields",
- .result = REJECT,
- },
- {
- "test9 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 1, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test10 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test11 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test12 ld_imm64",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "not pointing to valid bpf_map",
- .result = REJECT,
- },
- {
- "test13 ld_imm64",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "arsh32 on imm",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "arsh32 on imm 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
- BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -16069393,
- },
- {
- "arsh32 on reg",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "arsh32 on reg 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
- BPF_MOV64_IMM(BPF_REG_1, 15),
- BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 43724,
- },
- {
- "arsh64 on imm",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "arsh64 on reg",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "no bpf_exit",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
- },
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "loop (back-edge)",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "loop2 (back-edge)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "conditional loop",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "read uninitialized register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "read invalid register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R15 is invalid",
- .result = REJECT,
- },
- {
- "program doesn't init R0 before exit",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "program doesn't init R0 before exit in all branches",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "stack out of bounds",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack",
- .result = REJECT,
- },
- {
- "invalid call insn1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode 8d",
- .result = REJECT,
- },
- {
- "invalid call insn2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_CALL uses reserved",
- .result = REJECT,
- },
- {
- "invalid function call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid func unknown#1234567",
- .result = REJECT,
- },
- {
- "uninitialized stack1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 2 },
- .errstr = "invalid indirect read from stack",
- .result = REJECT,
- },
- {
- "uninitialized stack2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid read from stack",
- .result = REJECT,
- },
- {
- "invalid fp arithmetic",
- /* If this gets ever changed, make sure JITs can deal with it. */
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 subtraction from stack pointer",
- .result = REJECT,
- },
- {
- "non-invalid fp arithmetic",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "invalid argument register",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "non-invalid argument register",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "check valid spill/fill",
- .insns = {
- /* spill R1(ctx) into stack */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- /* fill it back into R2 */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
- /* should be able to access R0 = *(R2 + 8) */
- /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .retval = POINTER_VALUE,
- },
- {
- "check valid spill/fill, skb mark",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = ACCEPT,
- },
- {
- "check corrupted spill/fill",
- .insns = {
- /* spill R1(ctx) into stack */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- /* mess up with R1 pointer on stack */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
- /* fill back into R0 is fine for priv.
- * R0 now becomes SCALAR_VALUE.
- */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- /* Load from R0 should fail. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .errstr = "R0 invalid mem access 'inv",
- .result = REJECT,
- },
- {
- "check corrupted spill/fill, LSB",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "check corrupted spill/fill, MSB",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "invalid src register in STX",
- .insns = {
- BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R15 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in STX",
- .insns = {
- BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R14 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in ST",
- .insns = {
- BPF_ST_MEM(BPF_B, 14, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R14 is invalid",
- .result = REJECT,
- },
- {
- "invalid src register in LDX",
- .insns = {
- BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R12 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in LDX",
- .insns = {
- BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R11 is invalid",
- .result = REJECT,
- },
- {
- "junk insn",
- .insns = {
- BPF_RAW_INSN(0, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode 00",
- .result = REJECT,
- },
- {
- "junk insn2",
- .insns = {
- BPF_RAW_INSN(1, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_LDX uses reserved fields",
- .result = REJECT,
- },
- {
- "junk insn3",
- .insns = {
- BPF_RAW_INSN(-1, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode ff",
- .result = REJECT,
- },
- {
- "junk insn4",
- .insns = {
- BPF_RAW_INSN(-1, -1, -1, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode ff",
- .result = REJECT,
- },
- {
- "junk insn5",
- .insns = {
- BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_ALU uses reserved fields",
- .result = REJECT,
- },
- {
- "misaligned read from stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned stack access",
- .result = REJECT,
- },
- {
- "invalid map_fd for function call",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_delete_elem),
- BPF_EXIT_INSN(),
- },
- .errstr = "fd 0 is not pointing to valid bpf_map",
- .result = REJECT,
- },
- {
- "don't check return value before access",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access 'map_value_or_null'",
- .result = REJECT,
- },
- {
- "access memory with incorrect alignment",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "misaligned value access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "sometimes access memory with incorrect alignment",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
- .errstr_unpriv = "R0 leaks addr",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "jump test 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 14),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 5),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 19),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_JMP_IMM(BPF_JA, 0, 0, 15),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
- BPF_JMP_IMM(BPF_JA, 0, 0, 7),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
- BPF_JMP_IMM(BPF_JA, 0, 0, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_delete_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 24 },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = -ENOENT,
- },
- {
- "jump test 4",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "access skb fields ok",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, queue_mapping)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, protocol)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_present)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_tci)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "access skb fields bad1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "access skb fields bad2",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "access skb fields bad3",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -12),
- },
- .fixup_map_hash_8b = { 6 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "access skb fields bad4",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -13),
- },
- .fixup_map_hash_8b = { 7 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff family",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff local_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff local_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "valid access __sk_buff family",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff local_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff local_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "invalid access of tc_classid for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of skb->mark for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->mark is not writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->tc_index is writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "check skb->priority is writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, priority)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "direct packet read for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "direct packet write for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "overlapping checks for direct packet access SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access family in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, family)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_ip4 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access local_ip4 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_port in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access local_port in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_ip6 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access local_ip6 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access size in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, size)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid 64B read of size in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, size)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid read past end of SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, size) + 4),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid read offset in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, family) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet read for SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "direct packet write for SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "overlapping checks for direct packet access SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "check skb->mark is not writeable by sockets",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "check skb->tc_index is not writeable by sockets",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "check cb access: byte",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "__sk_buff->hash, offset 0, byte store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, hash)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "__sk_buff->tc_index, offset 3, byte store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index) + 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash byte load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: byte, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check cb access: half",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: half, unaligned",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check __sk_buff->hash, offset 0, half store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, hash)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->tc_index, offset 2, half store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash half load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash half load permitted 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash half load not permitted, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#endif
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash half load not permitted, unaligned 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
-#endif
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check cb access: half, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check cb access: word",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: word, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: double, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double, unaligned 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double, oob 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, oob 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->ifindex dw store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, ifindex)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->ifindex dw load not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, ifindex)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check out of range skb->cb access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 256),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
- },
- {
- "write skb fields from socket prog",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .errstr_unpriv = "R1 leaks addr",
- .result_unpriv = REJECT,
- },
- {
- "write skb fields from tc_cls_act prog",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tstamp)),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tstamp)),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "PTR_TO_STACK store/load",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xfaceb00c,
- },
- {
- "PTR_TO_STACK store/load - bad alignment on off",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
- },
- {
- "PTR_TO_STACK store/load - bad alignment on reg",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
- },
- {
- "PTR_TO_STACK store/load - out of bounds low",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off=-79992 size=8",
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- },
- {
- "PTR_TO_STACK store/load - out of bounds high",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off=0 size=8",
- },
- {
- "unpriv: return pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr",
- .retval = POINTER_VALUE,
- },
- {
- "unpriv: add const to pointer",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "unpriv: add pointer to pointer",
- .insns = {
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 pointer += pointer",
- },
- {
- "unpriv: neg pointer",
- .insns = {
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 pointer arithmetic",
- },
- {
- "unpriv: cmp pointer with const",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 pointer comparison",
- },
- {
- "unpriv: cmp pointer with pointer",
- .insns = {
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R10 pointer comparison",
- },
- {
- "unpriv: check that printk is disallowed",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_trace_printk),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "unknown func bpf_trace_printk#6",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: pass pointer to helper function",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr_unpriv = "R4 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: indirectly pass pointer on stack to helper function",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "invalid indirect read from stack off -8+0 size 8",
- .result = REJECT,
- },
- {
- "unpriv: mangle pointer on stack 1",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: mangle pointer on stack 2",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: read pointer from stack in small chunks",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid size",
- .result = REJECT,
- },
- {
- "unpriv: write pointer into ctx",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 leaks addr",
- .result_unpriv = REJECT,
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "unpriv: spill/fill of ctx",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "unpriv: spill/fill of ctx 2",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of ctx 3",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=fp expected=ctx",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of ctx 4",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
- BPF_REG_0, -8, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=inv expected=ctx",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - ctx and sock",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb == NULL) *target = sock; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* else *target = skb; */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* struct __sk_buff *skb = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* skb->mark = 42; */
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- /* if (sk) bpf_sk_release(sk) */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "type=ctx expected=sock",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - leak sock",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb == NULL) *target = sock; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* else *target = skb; */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* struct __sk_buff *skb = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* skb->mark = 42; */
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- //.errstr = "same insn cannot be used with different pointers",
- .errstr = "Unreleased reference",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb) *target = skb */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* else *target = sock */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* struct bpf_sock *sk = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct bpf_sock, mark)),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb) *target = skb */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* else *target = sock */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* struct bpf_sock *sk = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct bpf_sock, mark)),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- //.errstr = "same insn cannot be used with different pointers",
- .errstr = "cannot write into socket",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers ldx",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- -(__s32)offsetof(struct bpf_perf_event_data,
- sample_period) - 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
- offsetof(struct bpf_perf_event_data,
- sample_period)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "unpriv: write pointer into map elem value",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "alu32: mov u32 const",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_7, 0),
- BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
- BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "unpriv: partial copy of pointer",
- .insns = {
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 partial copy",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: pass pointer to tail_call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .errstr_unpriv = "R3 leaks addr into helper",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: cmp map pointer with zero",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: write into frame pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "frame pointer is read only",
- .result = REJECT,
- },
- {
- "unpriv: spill/fill frame pointer",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "frame pointer is read only",
- .result = REJECT,
- },
- {
- "unpriv: cmp of frame pointer",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: adding of fp",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: cmp of stack pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R2 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "runtime/jit: tail_call within bounds, prog once",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "runtime/jit: tail_call within bounds, prog loop",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 41,
- },
- {
- "runtime/jit: tail_call within bounds, no prog",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "runtime/jit: tail_call out of bounds",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 256),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "runtime/jit: pass negative index to tail_call",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "runtime/jit: pass > 32bit index to tail_call",
- .insns = {
- BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 2 },
- .result = ACCEPT,
- .retval = 42,
- /* Verifier rewrite for unpriv skips tail call here. */
- .retval_unpriv = 2,
- },
- {
- "PTR_TO_STACK check high 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "invalid stack off=0 size=1",
- .result = REJECT,
- },
- {
- "PTR_TO_STACK check high 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check high 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check high 7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "fp pointer offset",
- },
- {
- "PTR_TO_STACK check low 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check low 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check low 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "invalid stack off=-513 size=1",
- .result = REJECT,
- },
- {
- "PTR_TO_STACK check low 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "math between fp pointer",
- },
- {
- "PTR_TO_STACK check low 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check low 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check low 7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "fp pointer offset",
- },
- {
- "PTR_TO_STACK mixed reg/k, 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK mixed reg/k, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK mixed reg/k, 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -3,
- },
- {
- "PTR_TO_STACK reg",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid stack off=0 size=1",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "stack pointer arithmetic",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "raw_stack: no skb_load_bytes",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- /* Call to skb_load_bytes() omitted. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid read from stack off -8+0 size 8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, negative len",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, negative len 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, ~0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, zero len",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, no init",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, init",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs around bounds",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs corruption",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs corruption 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R3 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs + data",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-513 access_size=8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-1 access_size=8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-512 access_size=0",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, large access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 512),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "context stores via ST",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_ST stores into R1 ctx is not allowed",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "context stores via XADD",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
- BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
- BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access off=76",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- },
- {
- "direct packet access: test4 (write)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test5 (pkt_end >= reg, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test6 (pkt_end >= reg, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test7 (pkt_end >= reg, both accesses)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test8 (double test, variant 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test9 (double test, variant 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test10 (write invalid)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test11 (shift, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_3, 144),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test12 (and, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_3, 144),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test13 (branches, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
- BPF_MOV64_IMM(BPF_REG_3, 14),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_3, 24),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
- BPF_MOV64_IMM(BPF_REG_5, 12),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test15 (spill with xadd)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_5, 4096),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 invalid mem access 'inv'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test16 (arith on data_end)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test17 (pruning, alignment)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
- BPF_JMP_A(-6),
- },
- .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "direct packet access: test18 (imm += pkt_ptr, 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 8),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test19 (imm += pkt_ptr, 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- BPF_MOV64_IMM(BPF_REG_4, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test20 (x += pkt_ptr, 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test21 (x += pkt_ptr, 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test22 (x += pkt_ptr, 3)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test23 (x += pkt_ptr, 4)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test24 (x += pkt_ptr, 5)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 64),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test25 (marking on <, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test26 (marking on <, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test27 (marking on <=, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test28 (marking on <=, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test1, valid packet_ptr range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test2, unchecked packet_ptr",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test3, variable add",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test4, packet_ptr with bad range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 7 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test5, packet_ptr with too short range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 6 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test6, cls valid packet_ptr range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test7, cls unchecked packet_ptr",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test8, cls variable add",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test9, cls packet_ptr with bad range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 7 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test10, cls packet_ptr with too short range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 6 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test11, cls unsuitable helper 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_4, 42),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_store_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "helper access to the packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test12, cls unsuitable helper 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_4, 4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "helper access to the packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test13, cls helper ok",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test14, cls helper ok sub",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test15, cls helper fail sub",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test16, cls helper fail range 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test17, cls helper fail range 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, -9),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R2 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test18, cls helper fail range 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, ~0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R2 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test19, cls helper range zero",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test20, pkt end as input",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=pkt_end expected=fp",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test21, wrong reg",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "prevent map lookup in sockmap",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_sockmap = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
- },
- {
- "prevent map lookup in sockhash",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_sockhash = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
- },
- {
- "prevent map lookup in xskmap",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_xskmap = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "prevent map lookup in stack trace",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_stacktrace = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "prevent map lookup in prog array",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_prog2 = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
- },
- {
- "valid map access into an array with a constant",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "valid map access into an array with a register",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "valid map access into an array with a variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "valid map access into an array with a signed variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a constant",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=48 size=8",
- .result = REJECT,
- },
- {
- "invalid map access into an array with a register",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 min value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with no floor check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "R0 unbounded memory access",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a invalid max check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "invalid access to map value, value_size=48 off=44 size=8",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a invalid max check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3, 11 },
- .errstr = "R0 pointer += pointer",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet read test#1 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, queue_mapping)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, protocol)),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_present)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid bpf_context access off=76 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#2 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_tci)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_proto)),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, priority)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, priority)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff,
- ingress_ifindex)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#3 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
- offsetof(struct __sk_buff, cb[4])),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#4 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of tc_classid for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of data_meta for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data_meta)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of flow_keys for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, flow_keys)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid write access to napi_id for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
- offsetof(struct __sk_buff, napi_id)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "valid cgroup storage access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "fd 1 is not pointing to valid bpf_map",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=256 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid cgroup storage access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 7),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .errstr_unpriv = "R2 leaks addr into helper function",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "valid per-cpu cgroup storage access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "fd 1 is not pointing to valid bpf_map",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=256 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid per-cpu cgroup storage access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 7),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .errstr_unpriv = "R2 leaks addr into helper function",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "write tstamp from CGROUP_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tstamp)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid bpf_context access off=152 size=8",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "read tstamp from CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tstamp)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "multiple registers share map_lookup_elem result",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "invalid memory access with multiple map_lookup_elem calls",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = REJECT,
- .errstr = "R4 !read_ok",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "valid indirect map_lookup_elem access with 2nd lookup in branch",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_2, 10),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "invalid map access from else condition",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 unbounded memory access",
- .result = REJECT,
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "constant register |= constant should keep constant type",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant should not bypass stack boundary checks",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-48 access_size=58",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant register should keep constant type",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_4, 13),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant register should not bypass stack boundary checks",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_4, 24),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-48 access_size=58",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "invalid direct packet write for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "cannot write into packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "invalid direct packet write for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "cannot write into packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
- },
- {
- "direct packet write for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "direct packet read for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "direct packet read for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
- },
- {
- "direct packet read for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "overlapping checks for direct packet access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "make headroom for LWT_XMIT",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
- /* split for s390 to succeed */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 42),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "invalid access of tc_classid for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of tc_classid for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of tc_classid for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "leak pointer into ctx 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 2 },
- .errstr_unpriv = "R2 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- },
- {
- "leak pointer into ctx 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- },
- {
- "leak pointer into ctx 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .errstr_unpriv = "R2 leaks addr into ctx",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "leak pointer into map val",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr_unpriv = "R6 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "helper access to map: full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=0",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=56",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: negative range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=0",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): negative range (> adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): negative range (< adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): negative range (> adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): negative range (< adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 unbounded memory access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): wrong max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=45",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 unbounded memory access",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <=, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <=, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 unbounded memory access",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, good access 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 min value is negative",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, good access 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 min value is negative",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map access: known scalar += value_ptr from different maps",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 tried to add from different maps",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar from different maps",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 min value is outside of the array range",
- .retval = 1,
- },
- {
- "map access: known scalar += value_ptr from different maps, but same value properties",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: mixing value pointer and scalar, 1",
- .insns = {
- // load map value pointer into r0 and r2
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
- BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- // load some number from the map into r1
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
- // branch A
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_JMP_A(2),
- // branch B
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0x100000),
- // common instruction
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- // branch A
- BPF_JMP_A(4),
- // branch B
- BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
- // verifier follows fall-through
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- // fake-dead code; targeted from branch A to
- // prevent dead code sanitization
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 1 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different pointers or scalars",
- .retval = 0,
- },
- {
- "map access: mixing value pointer and scalar, 2",
- .insns = {
- // load map value pointer into r0 and r2
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
- BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- // load some number from the map into r1
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- // branch A
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0x100000),
- BPF_JMP_A(2),
- // branch B
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- // common instruction
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- // branch A
- BPF_JMP_A(4),
- // branch B
- BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
- // verifier follows fall-through
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- // fake-dead code; targeted from branch A to
- // prevent dead code sanitization
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 1 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different maps or paths",
- .retval = 0,
- },
- {
- "sanitation: alu with different scalars",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
- BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0x100000),
- BPF_JMP_A(2),
- BPF_MOV64_IMM(BPF_REG_2, 42),
- BPF_MOV64_IMM(BPF_REG_3, 0x100001),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 1 },
- .result = ACCEPT,
- .retval = 0x100000,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: known scalar += value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "invalid access to map value",
- },
- {
- "map access: value_ptr += known scalar, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "invalid access to map value",
- },
- {
- "map access: value_ptr += known scalar, 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, -2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 5",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: value_ptr += known scalar, 6",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: unknown scalar += value_ptr, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_1, 19),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 max value is outside of the array range",
- .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
- },
- {
- "map access: value_ptr += unknown scalar, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += unknown scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: value_ptr += unknown scalar, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 pointer += pointer prohibited",
- },
- {
- "map access: known scalar -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 tried to subtract pointer from scalar",
- },
- {
- "map access: value_ptr -= known scalar",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- },
- {
- "map access: value_ptr -= known scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: unknown scalar -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 tried to subtract pointer from scalar",
- },
- {
- "map access: value_ptr -= unknown scalar",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is negative",
- },
- {
- "map access: value_ptr -= unknown scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- .errstr_unpriv = "R0 pointer -= pointer prohibited",
- },
- {
- "map lookup helper access to map",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 8 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map update helper access to map",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map update helper access to map: wrong size",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .fixup_map_hash_16b = { 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=0 size=16",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- offsetof(struct other_val, bar)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm): out-of-bound 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- sizeof(struct other_val) - 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm): out-of-bound 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct other_val, bar)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg): out-of-bound 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- sizeof(struct other_val) - 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg): out-of-bound 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, -4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct other_val, bar), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable): no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable): wrong max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct other_val, bar) + 1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 11 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=9 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map element value is preserved across register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- },
- {
- "map element value or null is marked on register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- },
- {
- "map element value store of cleared call register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R1 !read_ok",
- .errstr = "R1 !read_ok",
- .result = REJECT,
- .result_unpriv = REJECT,
- },
- {
- "map element value with unaligned store",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value with unaligned load",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value illegal alu op, 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 bitwise operator &= on pointer",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 32-bit pointer arithmetic prohibited",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 pointer arithmetic with /= operator",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 pointer arithmetic prohibited",
- .errstr = "invalid mem access 'inv'",
- .result = REJECT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value illegal alu op, 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_3, 4096),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 invalid mem access 'inv'",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value is preserved across register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
- offsetof(struct test_val, foo)),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, bitwise AND, zero included",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP (signed), correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, bounds + offset",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- /* because max wasn't checked, signed min is negative */
- .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, no min check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP (signed), no min check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val), 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) + 1, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=49",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map adjusted, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) - 20, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map adjusted, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) - 19, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 0 /* csum_diff of 64-byte packet */,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: 8 bytes leak",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+32 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: 8 bytes no leak (init memory)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "invalid and of negative number",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid range check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
- BPF_MOV32_IMM(BPF_REG_3, 1),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
- BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map in map access",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .result = ACCEPT,
- },
- {
- "invalid inner map pointer",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .errstr = "R1 pointer arithmetic on map_ptr prohibited",
- .result = REJECT,
- },
- {
- "forgot null checking on the inner map pointer",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .errstr = "R1 type=map_value_or_null expected=map_ptr",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .errstr = "R5 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_7, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "ld_abs: tests on r6 and skb data reload helper",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42 /* ultimate return value */,
- },
- {
- "ld_ind: check calling conv, r1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_3, 1),
- BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .errstr = "R5 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "check bpf_perf_event_data->sample_period byte load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 7),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period half load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 6),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period word load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 4),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period dword load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check skb->data half load not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->tc_classid half load not permitted for lwt prog",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "bounds checks mixing signed and unsigned, positive bounds",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 2),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 5",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_6, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 min value is negative, either use unsigned",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 7",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 8",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 9",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 10",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 11",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- /* Dead branch. */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 12",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -6),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 13",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 2),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 14",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_8, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
- BPF_JMP_IMM(BPF_JA, 0, 0, -7),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 15",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -6),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- .result_unpriv = REJECT,
- },
- {
- "subtraction bounds (map value) variant 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- },
- {
- "subtraction bounds (map value) variant 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "check subtraction on pointers for unpriv",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1, 9 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R9 pointer -= pointer prohibited",
- },
- {
- "bounds check based on zero-extended MOV",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0x0000'0000'ffff'ffff */
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0 */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check based on sign-extended MOV. test1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0xffff'ffff'ffff'ffff */
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0xffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
- /* r0 = <oob pointer> */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access to OOB pointer */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 4294967295",
- .result = REJECT
- },
- {
- "bounds check based on sign-extended MOV. test2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0xffff'ffff'ffff'ffff */
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0xfff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
- /* r0 = <oob pointer> */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access to OOB pointer */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 min value is outside of the array range",
- .result = REJECT
- },
- {
- "bounds check based on reg_off + var_off + insn_off. test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "value_size=8 off=1073741825",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "bounds check based on reg_off + var_off + insn_off. test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "value 1073741823",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "bounds check after truncation of non-boundary-crossing range",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- /* r2 = 0x10'0000'0000 */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
- /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- /* r1 = [0x00, 0xff] */
- BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
- /* r1 = 0 */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check after truncation of boundary-crossing range (1)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
- * [0x0000'0000, 0x0000'007f]
- */
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0x00, 0xff] or
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- /* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after truncation of boundary-crossing range (2)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
- * [0x0000'0000, 0x0000'007f]
- * difference to previous test: truncation via MOV32
- * instead of ALU32.
- */
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0x00, 0xff] or
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- /* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after wrapping 32-bit addition",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- /* r1 = 0x7fff'ffff */
- BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
- /* r1 = 0xffff'fffe */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- /* r1 = 0 */
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check after shift with oversized count operand",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_2, 32),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- /* r1 = (u32)1 << (u32)32 = ? */
- BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
- /* r1 = [0x0000, 0xffff] */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
- /* computes unknown pointer, potentially OOB */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT
- },
- {
- "bounds check after right shift of maybe-negative number",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- /* r1 = [-0x01, 0xfe] */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
- /* r1 = 0 or 0xff'ffff'ffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* r1 = 0 or 0xffff'ffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* computes unknown pointer, potentially OOB */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after 32-bit right shift with 64-bit input",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- /* r1 = 2 */
- BPF_MOV64_IMM(BPF_REG_1, 2),
- /* r1 = 1<<32 */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
- /* r1 = 0 (NOT 2!) */
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
- /* r1 = 0xffff'fffe (NOT 0!) */
- BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
- /* computes OOB pointer */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
- .result = REJECT,
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 2147483646",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "pointer offset 1073741822",
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "pointer offset -1073741822",
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_1, 1000000),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 1000000000000",
- .result = REJECT
- },
- {
- "pointer/scalar confusion in state equality check (way 1)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr as return value"
- },
- {
- "pointer/scalar confusion in state equality check (way 2)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_JMP_A(1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr as return value"
- },
- {
- "variable-offset ctx access",
- .insns = {
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- /* add it to skb. We now have either &skb->len or
- * &skb->pkt_type, but we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- /* dereference it */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "variable-offset stack access",
- .insns = {
- /* Fill the top 8 bytes of the stack */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
- /* add it to fp. We now have either fp-4 or fp-8, but
- * we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
- /* dereference it */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "indirect variable-offset stack access",
- .insns = {
- /* Fill the top 8 bytes of the stack */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
- /* add it to fp. We now have either fp-4 or fp-8, but
- * we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
- /* dereference it indirectly */
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .errstr = "variable stack read R2",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "direct stack access with 32-bit wraparound. test1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer and 2147483647",
- .result = REJECT
- },
- {
- "direct stack access with 32-bit wraparound. test2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer and 1073741823",
- .result = REJECT
- },
- {
- "direct stack access with 32-bit wraparound. test3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer offset 1073741822",
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result = REJECT
- },
- {
- "liveness pruning and write screening",
- .insns = {
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* branch conditions teach us nothing about R2 */
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "varlen_map_value_access pruning",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "R0 unbounded memory access",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid 64-bit BPF_END",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 0),
- {
- .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
- .dst_reg = BPF_REG_0,
- .src_reg = 0,
- .off = 0,
- .imm = 32,
- },
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode d7",
- .result = REJECT,
- },
- {
- "XDP, using ifindex from netdev",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, ingress_ifindex)),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .retval = 1,
- },
- {
- "meta access, test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet, off=-8",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test5",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_xdp_adjust_meta),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R3 !read_ok",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test7",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test8",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test9",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test10",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_IMM(BPF_REG_5, 42),
- BPF_MOV64_IMM(BPF_REG_6, 24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test11",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_IMM(BPF_REG_5, 42),
- BPF_MOV64_IMM(BPF_REG_6, 24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test12",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "arithmetic ops make PTR_TO_CTX unusable",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct __sk_buff, data) -
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .errstr = "dereference of modified ctx ptr",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "pkt_end - pkt_start is allowed",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = TEST_DATA_LEN,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "XDP pkt read, pkt_end mangling, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "XDP pkt read, pkt_end mangling, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "check deducing bounds from const, 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check deducing bounds from const, 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, ~0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, ~0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 9",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 10",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
- /* Marks reg as unknown. */
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
- },
- {
- "bpf_exit with invalid return code. test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x0; 0xffffffff)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x0; 0x3)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x2; 0x0)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 is not a known value (ctx)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test7",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has unknown scalar value",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "calls: basic sanity",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: not on unpriviledged",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: div by 0 in subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(BPF_REG_2, 0),
- BPF_MOV32_IMM(BPF_REG_3, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: multiple ret types in subprog 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- },
- {
- "calls: multiple ret types in subprog 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 16 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- },
- {
- "calls: overlapping caller/callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn is not an exit or jmp",
- .result = REJECT,
- },
- {
- "calls: wrong recursive calls",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: wrong src reg",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "BPF_CALL uses reserved fields",
- .result = REJECT,
- },
- {
- "calls: wrong off value",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "BPF_CALL uses reserved fields",
- .result = REJECT,
- },
- {
- "calls: jump back loop",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn 0 to 0",
- .result = REJECT,
- },
- {
- "calls: conditional call",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: conditional call 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: conditional call 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: conditional call 4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -5),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: conditional call 5",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: conditional call 6",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: using r0 returned by callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: using uninit r0 from callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "calls: callee is using r1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN,
- },
- {
- "calls: callee using args1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "calls: callee using wrong args2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "calls: callee using two args",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, len)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
- offsetof(struct __sk_buff, len)),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
- },
- {
- "calls: callee changing pkt pointers",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- /* clear_all_pkt_pointers() has to walk all frames
- * to make sure that pkt pointers in the caller
- * are cleared when callee is calling a helper that
- * adjusts packet size
- */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_xdp_adjust_head),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R6 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls with args",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN + TEST_DATA_LEN,
- },
- {
- "calls: calls with stack arith",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "calls: calls with misaligned stack access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- .errstr = "misaligned stack access",
- .result = REJECT,
- },
- {
- "calls: calls control flow, jump test",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 43),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 43,
- },
- {
- "calls: calls control flow, jump test 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 43),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "jump out of range from insn 1 to 4",
- .result = REJECT,
- },
- {
- "calls: two calls with bad jump",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range from insn 11 to 9",
- .result = REJECT,
- },
- {
- "calls: recursive call. test1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "calls: recursive call. test2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "calls: unreachable code",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "unreachable insn 6",
- .result = REJECT,
- },
- {
- "calls: invalid call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "invalid destination",
- .result = REJECT,
- },
- {
- "calls: invalid call 2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "invalid destination",
- .result = REJECT,
- },
- {
- "calls: jumping across function bodies. test1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: jumping across function bodies. test2",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: call without exit",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "calls: call into middle of ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn",
- .result = REJECT,
- },
- {
- "calls: call into middle of other call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn",
- .result = REJECT,
- },
- {
- "calls: ld_abs with changing ctx data in callee",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
- .result = REJECT,
- },
- {
- "calls: two calls with bad fallthrough",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "calls: two calls with stack read",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: two calls with stack write",
- .insns = {
- /* main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
- /* write into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* read from stack frame of main prog */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: stack overflow using two frames (pre-call access)",
- .insns = {
- /* prog 1 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* prog 2 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "combined stack size",
- .result = REJECT,
- },
- {
- "calls: stack overflow using two frames (post-call access)",
- .insns = {
- /* prog 1 */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
-
- /* prog 2 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "combined stack size",
- .result = REJECT,
- },
- {
- "calls: stack depth check using three frames. test1",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=32, stack_A=256, stack_B=64
- * and max(main+A, main+A+B) < 512
- */
- .result = ACCEPT,
- },
- {
- "calls: stack depth check using three frames. test2",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=32, stack_A=64, stack_B=256
- * and max(main+A, main+A+B) < 512
- */
- .result = ACCEPT,
- },
- {
- "calls: stack depth check using three frames. test3",
- .insns = {
- /* main */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
- BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- /* B */
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=64, stack_A=224, stack_B=256
- * and max(main+A, main+A+B) > 512
- */
- .errstr = "combined stack",
- .result = REJECT,
- },
- {
- "calls: stack depth check using three frames. test4",
- /* void main(void) {
- * func1(0);
- * func1(1);
- * func2(1);
- * }
- * void func1(int alloc_or_recurse) {
- * if (alloc_or_recurse) {
- * frame_pointer[-300] = 1;
- * } else {
- * func2(alloc_or_recurse);
- * }
- * }
- * void func2(int alloc_or_recurse) {
- * if (alloc_or_recurse) {
- * frame_pointer[-300] = 1;
- * }
- * }
- */
- .insns = {
- /* main */
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
- BPF_EXIT_INSN(),
- /* B */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = REJECT,
- .errstr = "combined stack",
- },
- {
- "calls: stack depth check using three frames. test5",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
- BPF_EXIT_INSN(),
- /* A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
- BPF_EXIT_INSN(),
- /* C */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
- BPF_EXIT_INSN(),
- /* D */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
- BPF_EXIT_INSN(),
- /* E */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
- BPF_EXIT_INSN(),
- /* F */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
- BPF_EXIT_INSN(),
- /* G */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
- BPF_EXIT_INSN(),
- /* H */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "call stack",
- .result = REJECT,
- },
- {
- "calls: spill into caller stack frame",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "cannot spill",
- .result = REJECT,
- },
- {
- "calls: write into caller stack frame",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "calls: write into callee stack frame",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "cannot return stack pointer",
- .result = REJECT,
- },
- {
- "calls: two calls with stack write and void return",
- .insns = {
- /* main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* write into stack frame of main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
- BPF_EXIT_INSN(), /* void return */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: ambiguous return value",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "calls: two calls that return map_value",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
-
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that return map_value with bool condition",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(), /* return 1 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that return map_value with incorrect bool check",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(), /* return 1 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = REJECT,
- .errstr = "invalid read from stack off -16+0 size 8",
- },
- {
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = ACCEPT,
- },
- {
- "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0), // 26
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
- BPF_JMP_IMM(BPF_JA, 0, 0, -30),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -8),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls that receive map_value_ptr_or_null via arg. test1",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that receive map_value_ptr_or_null via arg. test2",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 0 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- },
- {
- "calls: pkt_ptr spill into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = POINTER_VALUE,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- /* Marking is still kept, but not in all cases safe. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid access to packet",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* Marking is still kept and safe here. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* Check marking propagated. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "same insn cannot be used with different",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R4 invalid mem access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R4 invalid mem access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 8",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 9",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid access to packet",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: caller stack init to zero or map_value_or_null",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- /* fetch map_value_or_null or const_zero from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* store into map_value */
- BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* if (ctx == 0) return; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
- /* else bpf_map_lookup() and *(fp - 8) = r0 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 13 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "calls: stack init to zero and pruning",
- .insns = {
- /* first make allocated_stack 16 byte */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
- /* now fork the execution such that the false branch
- * of JGT insn will be verified second and it skisp zero
- * init of fp-8 stack slot. If stack liveness marking
- * is missing live_read marks from call map_lookup
- * processing then pruning will incorrectly assume
- * that fp-8 stack slot was unused in the fall-through
- * branch and will accept the program incorrectly
- */
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 6 },
- .errstr = "invalid indirect read from stack off -8+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "calls: two calls returning different map pointers for lookup (hash, array)",
- .insns = {
- /* main prog */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_CALL_REL(11),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_CALL_REL(12),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- /* subprog 1 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* subprog 2 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_48b = { 13 },
- .fixup_map_array_48b = { 16 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: two calls returning different map pointers for lookup (hash, map in map)",
- .insns = {
- /* main prog */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_CALL_REL(11),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_CALL_REL(12),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- /* subprog 1 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* subprog 2 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_in_map = { 16 },
- .fixup_map_array_48b = { 13 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'map_ptr'",
- },
- {
- "cond: two branches returning different map pointers for lookup (tail, tail)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 5 },
- .fixup_prog2 = { 2 },
- .result_unpriv = REJECT,
- .errstr_unpriv = "tail_call abusing map_ptr",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "cond: two branches returning same map pointers for lookup (tail, tail)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog2 = { 2, 5 },
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "search pruning: all branches should be verified (nop operation)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_A(1),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R6 invalid mem access 'inv'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "search pruning: all branches should be verified (invalid stack access)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
- BPF_JMP_A(1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "invalid read from stack off -16+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "jit: lsh, rsh, arsh by 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 0xff),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: mov32 for ldimm64, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
- BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: mov32 for ldimm64, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
- BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: various mul tests",
- .insns = {
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
- BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
- BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "xadd/w check unaligned stack",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "xadd/w check unaligned map",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = REJECT,
- .errstr = "misaligned value access off",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "xadd/w check unaligned pkt",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 99),
- BPF_JMP_IMM(BPF_JA, 0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "BPF_XADD stores into R2 pkt is not allowed",
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "xadd/w check whether src/dst got mangled, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 3,
- },
- {
- "xadd/w check whether src/dst got mangled, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 3,
- },
- {
- "bpf_get_stack return R0 within range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
- BPF_MOV64_IMM(BPF_REG_4, 256),
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
- BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "ld_abs: invalid op 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_DW, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "unknown opcode",
- },
- {
- "ld_abs: invalid op 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 256),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "unknown opcode",
- },
- {
- "ld_abs: nmap reduced",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
- BPF_MOV32_IMM(BPF_REG_0, 18),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
- BPF_LD_IND(BPF_W, BPF_REG_7, 14),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
- BPF_MOV32_IMM(BPF_REG_0, 280971478),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
- BPF_MOV32_IMM(BPF_REG_0, 22),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
- BPF_LD_IND(BPF_H, BPF_REG_7, 14),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
- BPF_MOV32_IMM(BPF_REG_0, 17366),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV32_IMM(BPF_REG_0, 256),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .data = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 256,
- },
- {
- "ld_abs: div + abs, test 1",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
- BPF_LD_ABS(BPF_B, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 10,
- },
- {
- "ld_abs: div + abs, test 2",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
- BPF_LD_ABS(BPF_B, 128),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: div + abs, test 3",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: div + abs, test 4",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
- BPF_LD_ABS(BPF_B, 256),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: vlan + abs, test 1",
- .insns = { },
- .data = {
- 0x34,
- },
- .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0xbef,
- },
- {
- "ld_abs: vlan + abs, test 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .data = {
- 0x34,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "ld_abs: jump around ld_abs",
- .insns = { },
- .data = {
- 10, 11,
- },
- .fill_helper = bpf_fill_jump_around_ld_abs,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 10,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 1",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 4090,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 2",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2047,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 3",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 511,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 4",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 5,
- },
- {
- "pass unmodified ctx pointer to helper",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: leak potential reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: leak potential reference on stack",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: leak potential reference on stack 2",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: zero potential reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: copy and zero potential references",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: release reference without check",
- .insns = {
- BPF_SK_LOOKUP,
- /* reference in r0 may be NULL */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=sock_or_null expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: release reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference 2",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference twice",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: release reference twice inside branch",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: alloc, check, free in one subbranch",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
- /* if (offsetof(skb, mark) > data_len) exit; */
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
- offsetof(struct __sk_buff, mark)),
- BPF_SK_LOOKUP,
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
- /* Leak reference in R0 */
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "reference tracking: alloc, check, free in both subbranches",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
- /* if (offsetof(skb, mark) > data_len) exit; */
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
- offsetof(struct __sk_buff, mark)),
- BPF_SK_LOOKUP,
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "reference tracking in call: free reference in subprog",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "pass modified ctx pointer to helper, 1",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- },
- {
- "pass modified ctx pointer to helper, 2",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_socket_cookie),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr_unpriv = "dereference of modified ctx ptr",
- .errstr = "dereference of modified ctx ptr",
- },
- {
- "pass modified ctx pointer to helper, 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
- },
- {
- "mov64 src == dst",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
- // Check bounds are OK
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "mov64 src != dst",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
- // Check bounds are OK
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "allocated_stack",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
- BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
- BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = ACCEPT,
- .insn_processed = 15,
- },
- {
- "masking, test out of bounds 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 5),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 3",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 4",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 5",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 6",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 9",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 10",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 11",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 12",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 4),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 4,
- },
- {
- "masking, test in bounds 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 3",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xfffffffe,
- },
- {
- "masking, test in bounds 4",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
- BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xabcde,
- },
- {
- "masking, test in bounds 5",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 6",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 46),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 46,
- },
- {
- "masking, test in bounds 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -46),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 46,
- },
- {
- "masking, test in bounds 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -47),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "reference tracking in call: free reference in subprog and outside",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking in call: alloc & leak reference in subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
- BPF_SK_LOOKUP,
- /* spill unchecked sk_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking in call: alloc in subprog, release outside",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(), /* return sk */
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = POINTER_VALUE,
- .result = ACCEPT,
- },
- {
- "reference tracking in call: sk_ptr leak into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- /* spill unchecked sk_ptr into stack of caller */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking in call: sk_ptr spill into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- /* spill unchecked sk_ptr into stack of caller */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* now the sk_ptr is verified, free the reference */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: allow LD_ABS",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: forbid LD_ABS while holding reference",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
- .result = REJECT,
- },
- {
- "reference tracking: allow LD_IND",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "reference tracking: forbid LD_IND while holding reference",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
- .result = REJECT,
- },
- {
- "reference tracking: check reference or tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- BPF_SK_LOOKUP,
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 17 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference then tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- BPF_SK_LOOKUP,
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 18 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: leak possible reference over tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- /* Look up socket and store in REG_6 */
- BPF_SK_LOOKUP,
- /* bpf_tail_call() */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 16 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "tail_call would lead to reference leak",
- .result = REJECT,
- },
- {
- "reference tracking: leak checked reference over tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- /* Look up socket and store in REG_6 */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- /* if (!sk) goto end */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 17 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "tail_call would lead to reference leak",
- .result = REJECT,
- },
- {
- "reference tracking: mangle and release sock_or_null",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
- .result = REJECT,
- },
- {
- "reference tracking: mangle and release sock",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R1 pointer arithmetic on sock prohibited",
- .result = REJECT,
- },
- {
- "reference tracking: access member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: write to member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LD_IMM64(BPF_REG_2, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
- offsetof(struct bpf_sock, mark)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "cannot write into socket",
- .result = REJECT,
- },
- {
- "reference tracking: invalid 64-bit access of member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid bpf_sock access off=0 size=8",
- .result = REJECT,
- },
- {
- "reference tracking: access after release",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "reference tracking: direct access for lookup",
- .insns = {
- /* Check that the packet is at least 64B long */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
- /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "calls: ctx read at start of subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "check wire_len is not readable by sockets",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check wire_len is readable by tc classifier",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "check wire_len is not writable by tc classifier",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "calls: cross frame pruning",
- .insns = {
- /* r8 = !!random();
- * call pruner()
- * if (r8)
- * do something bad;
- */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: functional",
- .insns = {
- /* r0 = 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* prep for direct packet access via r2 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
-
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
-
- /* reg, bit 63 or bit 0 set, taken */
- BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
- BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
- BPF_EXIT_INSN(),
-
- /* reg, bit 62, not taken */
- BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
- BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_EXIT_INSN(),
-
- /* imm, any bit set, taken */
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
- BPF_EXIT_INSN(),
-
- /* imm, bit 31 set, taken */
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
- BPF_EXIT_INSN(),
-
- /* all good - return r0 == 2 */
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .runs = 7,
- .retvals = {
- { .retval = 2,
- .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
- },
- { .retval = 2,
- .data64 = { (1ULL << 63) | (1U << 31), }
- },
- { .retval = 2,
- .data64 = { (1ULL << 31) | (1U << 0), }
- },
- { .retval = 2,
- .data64 = { (__u32)-1, }
- },
- { .retval = 2,
- .data64 = { ~0x4000000000000000ULL, }
- },
- { .retval = 0,
- .data64 = { 0, }
- },
- { .retval = 0,
- .data64 = { ~0ULL, }
- },
- },
- },
- {
- "jset: sign-extend",
- .insns = {
- /* r0 = 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* prep for direct packet access via r2 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
-
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
-
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
- BPF_EXIT_INSN(),
+/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
+ * value into 0 and does necessary preparation for direct packet access
+ * through r2. The allowed access range is 8 bytes.
+ */
+#define BPF_DIRECT_PKT_R2 \
+ BPF_MOV64_IMM(BPF_REG_0, 0), \
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
+ offsetof(struct __sk_buff, data)), \
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
+ offsetof(struct __sk_buff, data_end)), \
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
+ BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
+ BPF_EXIT_INSN()
+
+/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
+ * positive u32, and zero-extend it into 64-bit.
+ */
+#define BPF_RAND_UEXT_R7 \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
+ BPF_FUNC_get_prandom_u32), \
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
+
+/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
+ * negative u32, and sign-extend it into 64-bit.
+ */
+#define BPF_RAND_SEXT_R7 \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
+ BPF_FUNC_get_prandom_u32), \
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
- },
- {
- "jset: known const compare",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .retval_unpriv = 1,
- .result_unpriv = ACCEPT,
- .retval = 1,
- .result = ACCEPT,
- },
- {
- "jset: known const compare bad",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: unknown const compare taken",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: unknown const compare not taken",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: half-known const compare",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- },
- {
- "jset: range",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- },
+static struct bpf_test tests[] = {
+#define FILL_ARRAY
+#include <verifier/tests.h>
+#undef FILL_ARRAY
};
static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/bpf/verifier/.gitignore b/tools/testing/selftests/bpf/verifier/.gitignore
new file mode 100644
index 000000000000..45984a364647
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/.gitignore
@@ -0,0 +1 @@
+tests.h
diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c
new file mode 100644
index 000000000000..e0fad1548737
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/and.c
@@ -0,0 +1,50 @@
+{
+ "invalid and of negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid range check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
+ BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
new file mode 100644
index 000000000000..0dcecaf3ec6f
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/array_access.c
@@ -0,0 +1,219 @@
+{
+ "valid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "valid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "valid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "valid map access into an array with a signed variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=48 size=8",
+ .result = REJECT,
+},
+{
+ "invalid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with no floor check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "R0 unbounded memory access",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "invalid access to map value, value_size=48 off=44 size=8",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3, 11 },
+ .errstr = "R0 pointer += pointer",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic.c b/tools/testing/selftests/bpf/verifier/basic.c
new file mode 100644
index 000000000000..b8d18642653a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic.c
@@ -0,0 +1,23 @@
+{
+ "empty prog",
+ .insns = {
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+},
+{
+ "only exit insn",
+ .insns = {
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "no bpf_exit",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
+ },
+ .errstr = "not an exit",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_call.c b/tools/testing/selftests/bpf/verifier/basic_call.c
new file mode 100644
index 000000000000..a8c6ab4c1622
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_call.c
@@ -0,0 +1,50 @@
+{
+ "invalid call insn1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode 8d",
+ .result = REJECT,
+},
+{
+ "invalid call insn2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_CALL uses reserved",
+ .result = REJECT,
+},
+{
+ "invalid function call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid func unknown#1234567",
+ .result = REJECT,
+},
+{
+ "invalid argument register",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "non-invalid argument register",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c
new file mode 100644
index 000000000000..ed91a7b9a456
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_instr.c
@@ -0,0 +1,134 @@
+{
+ "add+sub+mul",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_2, 3),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -3,
+},
+{
+ "xor32 zero extend check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_2, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
+ BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "arsh32 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 on imm 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -16069393,
+},
+{
+ "arsh32 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 on reg 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
+ BPF_MOV64_IMM(BPF_REG_1, 15),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 43724,
+},
+{
+ "arsh64 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "arsh64 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "invalid 64-bit BPF_END",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ {
+ .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
+ .dst_reg = BPF_REG_0,
+ .src_reg = 0,
+ .off = 0,
+ .imm = 32,
+ },
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode d7",
+ .result = REJECT,
+},
+{
+ "mov64 src == dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "mov64 src != dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_stack.c b/tools/testing/selftests/bpf/verifier/basic_stack.c
new file mode 100644
index 000000000000..b56f8117c09d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_stack.c
@@ -0,0 +1,64 @@
+{
+ "stack out of bounds",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack",
+ .result = REJECT,
+},
+{
+ "uninitialized stack1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 2 },
+ .errstr = "invalid indirect read from stack",
+ .result = REJECT,
+},
+{
+ "uninitialized stack2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid read from stack",
+ .result = REJECT,
+},
+{
+ "invalid fp arithmetic",
+ /* If this gets ever changed, make sure JITs can deal with it. */
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 subtraction from stack pointer",
+ .result = REJECT,
+},
+{
+ "non-invalid fp arithmetic",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "misaligned read from stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c
new file mode 100644
index 000000000000..7a0aab3f2cd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c
@@ -0,0 +1,45 @@
+{
+ "invalid src register in STX",
+ .insns = {
+ BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R15 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in STX",
+ .insns = {
+ BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R14 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in ST",
+ .insns = {
+ BPF_ST_MEM(BPF_B, 14, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R14 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid src register in LDX",
+ .insns = {
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R12 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in LDX",
+ .insns = {
+ BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R11 is invalid",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
new file mode 100644
index 000000000000..d55f476f2237
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -0,0 +1,508 @@
+{
+ "subtraction bounds (map value) variant 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+},
+{
+ "subtraction bounds (map value) variant 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
+ BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "check subtraction on pointers for unpriv",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1, 9 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R9 pointer -= pointer prohibited",
+},
+{
+ "bounds check based on zero-extended MOV",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0x0000'0000'ffff'ffff */
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check based on sign-extended MOV. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 4294967295",
+ .result = REJECT
+},
+{
+ "bounds check based on sign-extended MOV. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xfff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT
+},
+{
+ "bounds check based on reg_off + var_off + insn_off. test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "value_size=8 off=1073741825",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "bounds check based on reg_off + var_off + insn_off. test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "value 1073741823",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "bounds check after truncation of non-boundary-crossing range",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ /* r2 = 0x10'0000'0000 */
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+ /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = [0x00, 0xff] */
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check after truncation of boundary-crossing range (1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after truncation of boundary-crossing range (2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ * difference to previous test: truncation via MOV32
+ * instead of ALU32.
+ */
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after wrapping 32-bit addition",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ /* r1 = 0x7fff'ffff */
+ BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+ /* r1 = 0xffff'fffe */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check after shift with oversized count operand",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ /* r1 = (u32)1 << (u32)32 = ? */
+ BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x0000, 0xffff] */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT
+},
+{
+ "bounds check after right shift of maybe-negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ /* r1 = [-0x01, 0xfe] */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+ /* r1 = 0 or 0xff'ffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* r1 = 0 or 0xffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after 32-bit right shift with 64-bit input",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* r1 = 2 */
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ /* r1 = 1<<32 */
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
+ /* r1 = 0 (NOT 2!) */
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
+ /* r1 = 0xffff'fffe (NOT 0!) */
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
+ /* computes OOB pointer */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access",
+ .result = REJECT,
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 2147483646",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "pointer offset 1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "pointer offset -1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 1000000),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 1000000000000",
+ .result = REJECT
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
new file mode 100644
index 000000000000..1fd07a4f27ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
@@ -0,0 +1,124 @@
+{
+ "check deducing bounds from const, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "check deducing bounds from const, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check deducing bounds from const, 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check deducing bounds from const, 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check deducing bounds from const, 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ /* Marks reg as unknown. */
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
new file mode 100644
index 000000000000..9baca7a75c42
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
@@ -0,0 +1,406 @@
+{
+ "bounds checks mixing signed and unsigned, positive bounds",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 5",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_6, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 min value is negative, either use unsigned",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 7",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 8",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 9",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 10",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 11",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ /* Dead branch. */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 12",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 13",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 14",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -7),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 15",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
new file mode 100644
index 000000000000..f24d50f09dbe
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
@@ -0,0 +1,44 @@
+{
+ "bpf_get_stack return R0 within range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_4, 256),
+ BPF_EMIT_CALL(BPF_FUNC_get_stack),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
+ BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_get_stack),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
new file mode 100644
index 000000000000..4004891afa9c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -0,0 +1,1942 @@
+{
+ "calls: basic sanity",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: not on unpriviledged",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: div by 0 in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_2, 0),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: multiple ret types in subprog 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+},
+{
+ "calls: multiple ret types in subprog 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 16 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+},
+{
+ "calls: overlapping caller/callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn is not an exit or jmp",
+ .result = REJECT,
+},
+{
+ "calls: wrong recursive calls",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: wrong src reg",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+},
+{
+ "calls: wrong off value",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+},
+{
+ "calls: jump back loop",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn 0 to 0",
+ .result = REJECT,
+},
+{
+ "calls: conditional call",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: conditional call 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: conditional call 5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 6",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: using r0 returned by callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: using uninit r0 from callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "calls: callee is using r1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+},
+{
+ "calls: callee using args1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
+{
+ "calls: callee using wrong args2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "calls: callee using two args",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
+},
+{
+ "calls: callee changing pkt pointers",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* clear_all_pkt_pointers() has to walk all frames
+ * to make sure that pkt pointers in the caller
+ * are cleared when callee is calling a helper that
+ * adjusts packet size
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R6 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls with args",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN,
+},
+{
+ "calls: calls with stack arith",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "calls: calls with misaligned stack access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+},
+{
+ "calls: calls control flow, jump test",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 43,
+},
+{
+ "calls: calls control flow, jump test 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "jump out of range from insn 1 to 4",
+ .result = REJECT,
+},
+{
+ "calls: two calls with bad jump",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range from insn 11 to 9",
+ .result = REJECT,
+},
+{
+ "calls: recursive call. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "calls: recursive call. test2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "calls: unreachable code",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "unreachable insn 6",
+ .result = REJECT,
+},
+{
+ "calls: invalid call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+},
+{
+ "calls: invalid call 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+},
+{
+ "calls: jumping across function bodies. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: jumping across function bodies. test2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: call without exit",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+},
+{
+ "calls: call into middle of ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+},
+{
+ "calls: call into middle of other call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+},
+{
+ "calls: ld_abs with changing ctx data in callee",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
+ .result = REJECT,
+},
+{
+ "calls: two calls with bad fallthrough",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+},
+{
+ "calls: two calls with stack read",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: two calls with stack write",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ /* write into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* read from stack frame of main prog */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: stack overflow using two frames (pre-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+},
+{
+ "calls: stack overflow using two frames (post-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+},
+{
+ "calls: stack depth check using three frames. test1",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=256, stack_B=64
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+},
+{
+ "calls: stack depth check using three frames. test2",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=64, stack_B=256
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+},
+{
+ "calls: stack depth check using three frames. test3",
+ .insns = {
+ /* main */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ /* B */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=64, stack_A=224, stack_B=256
+ * and max(main+A, main+A+B) > 512
+ */
+ .errstr = "combined stack",
+ .result = REJECT,
+},
+{
+ "calls: stack depth check using three frames. test4",
+ /* void main(void) {
+ * func1(0);
+ * func1(1);
+ * func2(1);
+ * }
+ * void func1(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * } else {
+ * func2(alloc_or_recurse);
+ * }
+ * }
+ * void func2(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * }
+ * }
+ */
+ .insns = {
+ /* main */
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = REJECT,
+ .errstr = "combined stack",
+},
+{
+ "calls: stack depth check using three frames. test5",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+ BPF_EXIT_INSN(),
+ /* C */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+ BPF_EXIT_INSN(),
+ /* D */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+ BPF_EXIT_INSN(),
+ /* E */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+ BPF_EXIT_INSN(),
+ /* F */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+ BPF_EXIT_INSN(),
+ /* G */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+ BPF_EXIT_INSN(),
+ /* H */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "call stack",
+ .result = REJECT,
+},
+{
+ "calls: spill into caller stack frame",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot spill",
+ .result = REJECT,
+},
+{
+ "calls: write into caller stack frame",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "calls: write into callee stack frame",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot return stack pointer",
+ .result = REJECT,
+},
+{
+ "calls: two calls with stack write and void return",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* write into stack frame of main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(), /* void return */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: ambiguous return value",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "calls: two calls that return map_value",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that return map_value with bool condition",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that return map_value with incorrect bool check",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -16+0 size 8",
+},
+{
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = ACCEPT,
+},
+{
+ "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0), // 26
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
+ BPF_JMP_IMM(BPF_JA, 0, 0, -30),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -8),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls that receive map_value_ptr_or_null via arg. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that receive map_value_ptr_or_null via arg. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 0 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+},
+{
+ "calls: pkt_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* Marking is still kept, but not in all cases safe. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Marking is still kept and safe here. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Check marking propagated. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "same insn cannot be used with different",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: caller stack init to zero or map_value_or_null",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ /* fetch map_value_or_null or const_zero from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* store into map_value */
+ BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* if (ctx == 0) return; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+ /* else bpf_map_lookup() and *(fp - 8) = r0 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 13 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "calls: stack init to zero and pruning",
+ .insns = {
+ /* first make allocated_stack 16 byte */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ /* now fork the execution such that the false branch
+ * of JGT insn will be verified second and it skisp zero
+ * init of fp-8 stack slot. If stack liveness marking
+ * is missing live_read marks from call map_lookup
+ * processing then pruning will incorrectly assume
+ * that fp-8 stack slot was unused in the fall-through
+ * branch and will accept the program incorrectly
+ */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 6 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "calls: ctx read at start of subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "calls: cross frame pruning",
+ .insns = {
+ /* r8 = !!random();
+ * call pruner()
+ * if (r8)
+ * do something bad;
+ */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cfg.c b/tools/testing/selftests/bpf/verifier/cfg.c
new file mode 100644
index 000000000000..349c0862fb4c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cfg.c
@@ -0,0 +1,70 @@
+{
+ "unreachable",
+ .insns = {
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unreachable",
+ .result = REJECT,
+},
+{
+ "unreachable2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unreachable",
+ .result = REJECT,
+},
+{
+ "out of range jump",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "out of range jump2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, -2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "loop (back-edge)",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "loop2 (back-edge)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "conditional loop",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c
new file mode 100644
index 000000000000..6d65fe3e7321
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c
@@ -0,0 +1,72 @@
+{
+ "bpf_exit with invalid return code. test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x0; 0xffffffff)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x0; 0x3)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x2; 0x0)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 is not a known value (ctx)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test7",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has unknown scalar value",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_skb.c b/tools/testing/selftests/bpf/verifier/cgroup_skb.c
new file mode 100644
index 000000000000..52e4c03b076b
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_skb.c
@@ -0,0 +1,197 @@
+{
+ "direct packet read test#1 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid bpf_context access off=76 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#2 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_proto)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, priority)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, priority)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, ingress_ifindex)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#3 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#4 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of tc_classid for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of data_meta for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_meta)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of flow_keys for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, flow_keys)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid write access to napi_id for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid bpf_context access off=152 size=8",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_storage.c b/tools/testing/selftests/bpf/verifier/cgroup_storage.c
new file mode 100644
index 000000000000..97057c0a1b8a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_storage.c
@@ -0,0 +1,220 @@
+{
+ "valid cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .errstr_unpriv = "R2 leaks addr into helper function",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "valid per-cpu cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid per-cpu cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .errstr_unpriv = "R2 leaks addr into helper function",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
diff --git a/tools/testing/selftests/bpf/verifier/const_or.c b/tools/testing/selftests/bpf/verifier/const_or.c
new file mode 100644
index 000000000000..84446dfc7c1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/const_or.c
@@ -0,0 +1,60 @@
+{
+ "constant register |= constant should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant register should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 13),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant register should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 24),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c
new file mode 100644
index 000000000000..92762c08f5e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx.c
@@ -0,0 +1,93 @@
+{
+ "context stores via ST",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ST stores into R1 ctx is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "context stores via XADD",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
+ BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "arithmetic ops make PTR_TO_CTX unusable",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct __sk_buff, data) -
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "dereference of modified ctx ptr",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "pass unmodified ctx pointer to helper",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "pass modified ctx pointer to helper, 1",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+},
+{
+ "pass modified ctx pointer to helper, 2",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_socket_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr_unpriv = "dereference of modified ctx ptr",
+ .errstr = "dereference of modified ctx ptr",
+},
+{
+ "pass modified ctx pointer to helper, 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "variable ctx access var_off=(0x0; 0x4)",
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
new file mode 100644
index 000000000000..b0195770da6a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
@@ -0,0 +1,180 @@
+{
+ "valid access family in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, family)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_ip4 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access local_ip4 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_port in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access local_port in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_ip6 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access local_ip6 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access size in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, size)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "invalid 64B read of size in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, size)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "invalid read past end of SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, size) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "invalid read offset in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, family) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet read for SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "direct packet write for SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "overlapping checks for direct packet access SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
new file mode 100644
index 000000000000..881f1c7f57a1
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -0,0 +1,1033 @@
+{
+ "access skb fields ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "access skb fields bad1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "access skb fields bad2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "access skb fields bad3",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -12),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "access skb fields bad4",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -13),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff family",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff local_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff local_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "valid access __sk_buff family",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff local_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff local_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "invalid access of tc_classid for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of skb->mark for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->mark is not writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->tc_index is writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "check skb->priority is writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, priority)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "direct packet read for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "direct packet write for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "overlapping checks for direct packet access SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "check skb->mark is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
+{
+ "check skb->tc_index is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
+{
+ "check cb access: byte",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "__sk_buff->hash, offset 0, byte store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, hash)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "__sk_buff->tc_index, offset 3, byte store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check skb->hash byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: byte, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check cb access: half",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: half, unaligned",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check __sk_buff->hash, offset 0, half store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, hash)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->tc_index, offset 2, half store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check skb->hash half load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash half load permitted 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash half load not permitted, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check skb->hash half load not permitted, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check cb access: half, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check cb access: word",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: word, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: double, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check cb access: double, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->ifindex dw store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, ifindex)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->ifindex dw load not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, ifindex)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check cb access: double, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check out of range skb->cb access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 256),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+},
+{
+ "write skb fields from socket prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .errstr_unpriv = "R1 leaks addr",
+ .result_unpriv = REJECT,
+},
+{
+ "write skb fields from tc_cls_act prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "check skb->data half load not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "read gso_segs from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write gso_segs from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access off=164 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read gso_segs from CLS",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "check wire_len is not readable by sockets",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check wire_len is readable by tc classifier",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "check wire_len is not writable by tc classifier",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
new file mode 100644
index 000000000000..50a8a63be4ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -0,0 +1,159 @@
+{
+ "dead code: start",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: mid 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: mid 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "dead code: end 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: end 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: end 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: tail of main + func",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: tail of main + two functions",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: function in the middle and mid of another func",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: middle of main before call",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "dead code: start of a function",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
new file mode 100644
index 000000000000..e3fc22e672c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -0,0 +1,633 @@
+{
+ "pkt_end - pkt_start is allowed",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access off=76",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+},
+{
+ "direct packet access: test4 (write)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test5 (pkt_end >= reg, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test6 (pkt_end >= reg, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test7 (pkt_end >= reg, both accesses)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test8 (double test, variant 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test9 (double test, variant 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test10 (write invalid)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test11 (shift, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test12 (and, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test13 (branches, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 14),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 24),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_IMM(BPF_REG_5, 12),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test15 (spill with xadd)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 4096),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test16 (arith on data_end)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test17 (pruning, alignment)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_A(-6),
+ },
+ .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "direct packet access: test18 (imm += pkt_ptr, 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 8),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test19 (imm += pkt_ptr, 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test20 (x += pkt_ptr, 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test21 (x += pkt_ptr, 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test22 (x += pkt_ptr, 3)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test23 (x += pkt_ptr, 4)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test24 (x += pkt_ptr, 5)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 64),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test25 (marking on <, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test26 (marking on <, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test27 (marking on <=, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test28 (marking on <=, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c
new file mode 100644
index 000000000000..698e3779fdd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c
@@ -0,0 +1,40 @@
+{
+ "direct stack access with 32-bit wraparound. test1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer and 2147483647",
+ .result = REJECT
+},
+{
+ "direct stack access with 32-bit wraparound. test2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer and 1073741823",
+ .result = REJECT
+},
+{
+ "direct stack access with 32-bit wraparound. test3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer offset 1073741822",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result = REJECT
+},
diff --git a/tools/testing/selftests/bpf/verifier/div0.c b/tools/testing/selftests/bpf/verifier/div0.c
new file mode 100644
index 000000000000..7685edfbcf71
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/div0.c
@@ -0,0 +1,184 @@
+{
+ "DIV32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 2),
+ BPF_MOV32_IMM(BPF_REG_2, 16),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 8,
+},
+{
+ "DIV32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 by 0, zero check, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "MOD32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 3),
+ BPF_MOV32_IMM(BPF_REG_2, 5),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "MOD32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD64 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "MOD64 by 0, zero check 2, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, -1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = -1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/div_overflow.c b/tools/testing/selftests/bpf/verifier/div_overflow.c
new file mode 100644
index 000000000000..bd3f38dbe796
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/div_overflow.c
@@ -0,0 +1,104 @@
+/* Just make sure that JITs used udiv/umod as otherwise we get
+ * an exception from INT_MIN/-1 overflow similarly as with div
+ * by zero.
+ */
+{
+ "DIV32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "MOD32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+},
+{
+ "MOD32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+},
+{
+ "MOD64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
new file mode 100644
index 000000000000..1f39d845c64f
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
@@ -0,0 +1,614 @@
+{
+ "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, bitwise AND, zero included",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP (signed), correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, bounds + offset",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ /* because max wasn't checked, signed min is negative */
+ .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP (signed), no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val), 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=49",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map adjusted, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 20, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map adjusted, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 0 /* csum_diff of 64-byte packet */,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: 8 bytes leak",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+32 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: 8 bytes no leak (init memory)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_packet_access.c b/tools/testing/selftests/bpf/verifier/helper_packet_access.c
new file mode 100644
index 000000000000..ae54587e9829
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_packet_access.c
@@ -0,0 +1,460 @@
+{
+ "helper access to packet: test1, valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test2, unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test3, variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test4, packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test5, packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test6, cls valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test7, cls unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test8, cls variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test9, cls packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test10, cls packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test11, cls unsuitable helper 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 42),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test12, cls unsuitable helper 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test13, cls helper ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test14, cls helper ok sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test15, cls helper fail sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test16, cls helper fail range 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test17, cls helper fail range 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, -9),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R2 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test18, cls helper fail range 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, ~0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R2 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test19, cls helper range zero",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test20, pkt end as input",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=pkt_end expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test21, wrong reg",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_value_access.c b/tools/testing/selftests/bpf/verifier/helper_value_access.c
new file mode 100644
index 000000000000..7572e403ddb9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_value_access.c
@@ -0,0 +1,953 @@
+{
+ "helper access to map: full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=56",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: negative range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 unbounded memory access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): wrong max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=45",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 unbounded memory access",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <=, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <=, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 unbounded memory access",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, good access 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 min value is negative",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, good access 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 min value is negative",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map lookup helper access to map",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 8 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map update helper access to map",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map update helper access to map: wrong size",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_16b = { 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=0 size=16",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, offsetof(struct other_val, bar)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm): out-of-bound 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct other_val) - 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm): out-of-bound 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct other_val, bar)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg): out-of-bound 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct other_val) - 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg): out-of-bound 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, -4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable): no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable): wrong max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar) + 1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 11 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=9 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jit.c b/tools/testing/selftests/bpf/verifier/jit.c
new file mode 100644
index 000000000000..be488b4495a3
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jit.c
@@ -0,0 +1,88 @@
+{
+ "jit: lsh, rsh, arsh by 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 0xff),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: mov32 for ldimm64, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: mov32 for ldimm64, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
+ BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: various mul tests",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
+ BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
+ BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
+ BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
new file mode 100644
index 000000000000..ceb39ffa0e88
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -0,0 +1,724 @@
+{
+ "jset32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ /* reg, high bits shouldn't be tested */
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, -2, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1ULL << 63, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+},
+{
+ "jset32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000),
+ BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1ULL << 63, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+},
+{
+ "jset32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jeq32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 2,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ },
+},
+{
+ "jeq32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x7000000000000001),
+ BPF_JMP32_REG(BPF_JEQ, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 2, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+},
+{
+ "jeq32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jne32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 2,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ },
+},
+{
+ "jne32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP32_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 2, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 2, }
+ },
+ },
+},
+{
+ "jne32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jge32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, UINT_MAX - 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ },
+},
+{
+ "jge32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, UINT_MAX | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { INT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
+ },
+ },
+},
+{
+ "jge32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jgt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JGT, BPF_REG_7, UINT_MAX - 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ },
+},
+{
+ "jgt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (UINT_MAX - 1) | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
+ },
+ },
+},
+{
+ "jgt32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jle32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, INT_MAX, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { INT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX, }
+ },
+ },
+},
+{
+ "jle32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (INT_MAX - 1) | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX | 1ULL << 32, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX - 2, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ },
+},
+{
+ "jle32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jlt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JLT, BPF_REG_7, INT_MAX, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX - 1, }
+ },
+ },
+},
+{
+ "jlt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, INT_MAX | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX | 1ULL << 32, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { (INT_MAX - 1) | 3ULL << 32, }
+ },
+ },
+},
+{
+ "jlt32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsge32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ },
+},
+{
+ "jsge32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (__u32)-1 | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 2,
+ .data64 = { 0x7fffffff | 1ULL << 32, }
+ },
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ },
+},
+{
+ "jsge32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsgt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSGT, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ },
+},
+{
+ "jsgt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 0,
+ .data64 = { 0x1ffffffffULL, }
+ },
+ { .retval = 2,
+ .data64 = { 0x7fffffff, }
+ },
+ },
+},
+{
+ "jsgt32: min/max deduction",
+ .insns = {
+ BPF_RAND_SEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, (__u32)(-2) | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsle32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSLE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ },
+},
+{
+ "jsle32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 2,
+ .data64 = { (__u32)-1, }
+ },
+ { .retval = 0,
+ .data64 = { 0x7fffffff | 2ULL << 32, }
+ },
+ },
+},
+{
+ "jsle32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jslt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ },
+},
+{
+ "jslt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7fffffff | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 2,
+ .data64 = { 0xffffffff, }
+ },
+ { .retval = 0,
+ .data64 = { 0x7fffffff | 2ULL << 32, }
+ },
+ },
+},
+{
+ "jslt32: min/max deduction",
+ .insns = {
+ BPF_RAND_SEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, (__u32)(-1) | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
new file mode 100644
index 000000000000..7e14037acfaf
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jset.c
@@ -0,0 +1,165 @@
+{
+ "jset: functional",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ /* reg, bit 63 or bit 0 set, taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+
+ /* reg, bit 62, not taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, any bit set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, bit 31 set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ /* all good - return r0 == 2 */
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 7,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (__u32)-1, }
+ },
+ { .retval = 2,
+ .data64 = { ~0x4000000000000000ULL, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ { .retval = 0,
+ .data64 = { ~0ULL, }
+ },
+ },
+},
+{
+ "jset: sign-extend",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+ .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
+},
+{
+ "jset: known const compare",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .retval_unpriv = 1,
+ .result_unpriv = ACCEPT,
+ .retval = 1,
+ .result = ACCEPT,
+},
+{
+ "jset: known const compare bad",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: unknown const compare taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: unknown const compare not taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: half-known const compare",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+},
+{
+ "jset: range",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jump.c b/tools/testing/selftests/bpf/verifier/jump.c
new file mode 100644
index 000000000000..8e6fcc8940f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jump.c
@@ -0,0 +1,180 @@
+{
+ "jump test 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 14),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 11),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 19),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 15),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 11),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 24 },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = -ENOENT,
+},
+{
+ "jump test 4",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/junk_insn.c b/tools/testing/selftests/bpf/verifier/junk_insn.c
new file mode 100644
index 000000000000..89d690f1992a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/junk_insn.c
@@ -0,0 +1,45 @@
+{
+ "junk insn",
+ .insns = {
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+},
+{
+ "junk insn2",
+ .insns = {
+ BPF_RAW_INSN(1, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_LDX uses reserved fields",
+ .result = REJECT,
+},
+{
+ "junk insn3",
+ .insns = {
+ BPF_RAW_INSN(-1, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode ff",
+ .result = REJECT,
+},
+{
+ "junk insn4",
+ .insns = {
+ BPF_RAW_INSN(-1, -1, -1, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode ff",
+ .result = REJECT,
+},
+{
+ "junk insn5",
+ .insns = {
+ BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ALU uses reserved fields",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_abs.c b/tools/testing/selftests/bpf/verifier/ld_abs.c
new file mode 100644
index 000000000000..f6599d2ec22d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_abs.c
@@ -0,0 +1,286 @@
+{
+ "ld_abs: check calling conv, r1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R5 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "ld_abs: tests on r6 and skb data reload helper",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42 /* ultimate return value */,
+},
+{
+ "ld_abs: invalid op 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_DW, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "unknown opcode",
+},
+{
+ "ld_abs: invalid op 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 256),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "unknown opcode",
+},
+{
+ "ld_abs: nmap reduced",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
+ BPF_MOV32_IMM(BPF_REG_0, 18),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
+ BPF_LD_IND(BPF_W, BPF_REG_7, 14),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
+ BPF_MOV32_IMM(BPF_REG_0, 280971478),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
+ BPF_MOV32_IMM(BPF_REG_0, 22),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+ BPF_LD_IND(BPF_H, BPF_REG_7, 14),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
+ BPF_MOV32_IMM(BPF_REG_0, 17366),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 256),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 256,
+},
+{
+ "ld_abs: div + abs, test 1",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+ BPF_LD_ABS(BPF_B, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 10,
+},
+{
+ "ld_abs: div + abs, test 2",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+ BPF_LD_ABS(BPF_B, 128),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: div + abs, test 3",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: div + abs, test 4",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_B, 256),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: vlan + abs, test 1",
+ .insns = { },
+ .data = {
+ 0x34,
+ },
+ .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0xbef,
+},
+{
+ "ld_abs: vlan + abs, test 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_vlan_push),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 0x34,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "ld_abs: jump around ld_abs",
+ .insns = { },
+ .data = {
+ 10, 11,
+ },
+ .fill_helper = bpf_fill_jump_around_ld_abs,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 10,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_dw.c b/tools/testing/selftests/bpf/verifier/ld_dw.c
new file mode 100644
index 000000000000..d2c75b889598
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_dw.c
@@ -0,0 +1,36 @@
+{
+ "ld_dw: xor semi-random 64 bit imms, test 1",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 4090,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 2",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2047,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 3",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 511,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 4",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 5,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
new file mode 100644
index 000000000000..28b8c805a293
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
@@ -0,0 +1,141 @@
+{
+ "test1 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid BPF_LD_IMM insn",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "test2 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid BPF_LD_IMM insn",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "test3 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test4 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test5 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test6 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "test7 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "test8 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "uses reserved fields",
+ .result = REJECT,
+},
+{
+ "test9 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test10 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test11 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test12 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "not pointing to valid bpf_map",
+ .result = REJECT,
+},
+{
+ "test13 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_ind.c b/tools/testing/selftests/bpf/verifier/ld_ind.c
new file mode 100644
index 000000000000..079734227538
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_ind.c
@@ -0,0 +1,72 @@
+{
+ "ld_ind: check calling conv, r1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R5 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/leak_ptr.c b/tools/testing/selftests/bpf/verifier/leak_ptr.c
new file mode 100644
index 000000000000..d6eec17f2cd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/leak_ptr.c
@@ -0,0 +1,67 @@
+{
+ "leak pointer into ctx 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 2 },
+ .errstr_unpriv = "R2 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+},
+{
+ "leak pointer into ctx 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+},
+{
+ "leak pointer into ctx 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .errstr_unpriv = "R2 leaks addr into ctx",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "leak pointer into map val",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr_unpriv = "R6 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/lwt.c b/tools/testing/selftests/bpf/verifier/lwt.c
new file mode 100644
index 000000000000..2cab6a3966bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/lwt.c
@@ -0,0 +1,189 @@
+{
+ "invalid direct packet write for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "invalid direct packet write for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+},
+{
+ "direct packet write for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "direct packet read for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "direct packet read for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+},
+{
+ "direct packet read for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "overlapping checks for direct packet access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "make headroom for LWT_XMIT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ /* split for s390 to succeed */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "invalid access of tc_classid for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of tc_classid for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of tc_classid for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->tc_classid half load not permitted for lwt prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_in_map.c b/tools/testing/selftests/bpf/verifier/map_in_map.c
new file mode 100644
index 000000000000..2798927ee9ff
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_in_map.c
@@ -0,0 +1,62 @@
+{
+ "map in map access",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .result = ACCEPT,
+},
+{
+ "invalid inner map pointer",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .errstr = "R1 pointer arithmetic on map_ptr prohibited",
+ .result = REJECT,
+},
+{
+ "forgot null checking on the inner map pointer",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .errstr = "R1 type=map_value_or_null expected=map_ptr",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c
new file mode 100644
index 000000000000..cd26ee6b7b1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c
@@ -0,0 +1,100 @@
+{
+ "calls: two calls returning different map pointers for lookup (hash, array)",
+ .insns = {
+ /* main prog */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_CALL_REL(11),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_CALL_REL(12),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* subprog 1 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* subprog 2 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_48b = { 13 },
+ .fixup_map_array_48b = { 16 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: two calls returning different map pointers for lookup (hash, map in map)",
+ .insns = {
+ /* main prog */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_CALL_REL(11),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_CALL_REL(12),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* subprog 1 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* subprog 2 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_in_map = { 16 },
+ .fixup_map_array_48b = { 13 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'map_ptr'",
+},
+{
+ "cond: two branches returning different map pointers for lookup (tail, tail)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 5 },
+ .fixup_prog2 = { 2 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "tail_call abusing map_ptr",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "cond: two branches returning same map pointers for lookup (tail, tail)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog2 = { 2, 5 },
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .retval = 42,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_ret_val.c b/tools/testing/selftests/bpf/verifier/map_ret_val.c
new file mode 100644
index 000000000000..bdd0e8d18333
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_ret_val.c
@@ -0,0 +1,65 @@
+{
+ "invalid map_fd for function call",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fd 0 is not pointing to valid bpf_map",
+ .result = REJECT,
+},
+{
+ "don't check return value before access",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access 'map_value_or_null'",
+ .result = REJECT,
+},
+{
+ "access memory with incorrect alignment",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "misaligned value access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "sometimes access memory with incorrect alignment",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access",
+ .errstr_unpriv = "R0 leaks addr",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/masking.c b/tools/testing/selftests/bpf/verifier/masking.c
new file mode 100644
index 000000000000..6e1358c544fd
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/masking.c
@@ -0,0 +1,322 @@
+{
+ "masking, test out of bounds 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 5),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 4",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 5",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 6",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 11",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 12",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 4),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 4,
+},
+{
+ "masking, test in bounds 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xfffffffe,
+},
+{
+ "masking, test in bounds 4",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
+ BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xabcde,
+},
+{
+ "masking, test in bounds 5",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 6",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 46),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 46,
+},
+{
+ "masking, test in bounds 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -46),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 46,
+},
+{
+ "masking, test in bounds 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -47),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
diff --git a/tools/testing/selftests/bpf/verifier/meta_access.c b/tools/testing/selftests/bpf/verifier/meta_access.c
new file mode 100644
index 000000000000..205292b8dd65
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/meta_access.c
@@ -0,0 +1,235 @@
+{
+ "meta access, test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet, off=-8",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_meta),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R3 !read_ok",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test7",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test10",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_IMM(BPF_REG_5, 42),
+ BPF_MOV64_IMM(BPF_REG_6, 24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test11",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_IMM(BPF_REG_5, 42),
+ BPF_MOV64_IMM(BPF_REG_6, 24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test12",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
diff --git a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
new file mode 100644
index 000000000000..471c1a5950d8
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
@@ -0,0 +1,59 @@
+{
+ "check bpf_perf_event_data->sample_period byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 7),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period half load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 6),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period word load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 4),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period dword load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
new file mode 100644
index 000000000000..bbdba990fefb
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
@@ -0,0 +1,74 @@
+{
+ "prevent map lookup in sockmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "prevent map lookup in sockhash",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockhash = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "prevent map lookup in xskmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_xskmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "prevent map lookup in stack trace",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_stacktrace = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "prevent map lookup in prog array",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog2 = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
+},
diff --git a/tools/testing/selftests/bpf/verifier/raw_stack.c b/tools/testing/selftests/bpf/verifier/raw_stack.c
new file mode 100644
index 000000000000..193d9e87d5a9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/raw_stack.c
@@ -0,0 +1,305 @@
+{
+ "raw_stack: no skb_load_bytes",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ /* Call to skb_load_bytes() omitted. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -8+0 size 8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, negative len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, negative len 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, ~0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, zero len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, no init",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, init",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs around bounds",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs corruption",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs corruption 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R3 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs + data",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-513 access_size=8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-1 access_size=8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-512 access_size=0",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, large access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 512),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
new file mode 100644
index 000000000000..dc2cc823df2b
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -0,0 +1,607 @@
+{
+ "reference tracking: leak potential reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak potential reference on stack",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak potential reference on stack 2",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: zero potential reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: copy and zero potential references",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference without check",
+ .insns = {
+ BPF_SK_LOOKUP,
+ /* reference in r0 may be NULL */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=sock_or_null expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference 2",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference twice",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference twice inside branch",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: alloc, check, free in one subbranch",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
+ /* Leak reference in R0 */
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "reference tracking: alloc, check, free in both subbranches",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "reference tracking in call: free reference in subprog",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking in call: free reference in subprog and outside",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: alloc & leak reference in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
+ BPF_SK_LOOKUP,
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: alloc in subprog, release outside",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(), /* return sk */
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ .result = ACCEPT,
+},
+{
+ "reference tracking in call: sk_ptr leak into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: sk_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* now the sk_ptr is verified, free the reference */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: allow LD_ABS",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: forbid LD_ABS while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+},
+{
+ "reference tracking: allow LD_IND",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "reference tracking: forbid LD_IND while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+},
+{
+ "reference tracking: check reference or tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference then tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 18 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: leak possible reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP,
+ /* bpf_tail_call() */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 16 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak checked reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* if (!sk) goto end */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+},
+{
+ "reference tracking: mangle and release sock_or_null",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
+ .result = REJECT,
+},
+{
+ "reference tracking: mangle and release sock",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock prohibited",
+ .result = REJECT,
+},
+{
+ "reference tracking: access member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: write to member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LD_IMM64(BPF_REG_2, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
+ offsetof(struct bpf_sock, mark)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "cannot write into socket",
+ .result = REJECT,
+},
+{
+ "reference tracking: invalid 64-bit access of member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid bpf_sock access off=0 size=8",
+ .result = REJECT,
+},
+{
+ "reference tracking: access after release",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "reference tracking: direct access for lookup",
+ .insns = {
+ /* Check that the packet is at least 64B long */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
+ /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/runtime_jit.c b/tools/testing/selftests/bpf/verifier/runtime_jit.c
new file mode 100644
index 000000000000..a9a8f620e71c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/runtime_jit.c
@@ -0,0 +1,80 @@
+{
+ "runtime/jit: tail_call within bounds, prog once",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "runtime/jit: tail_call within bounds, prog loop",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 41,
+},
+{
+ "runtime/jit: tail_call within bounds, no prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "runtime/jit: tail_call out of bounds",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 256),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "runtime/jit: pass negative index to tail_call",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "runtime/jit: pass > 32bit index to tail_call",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 2 },
+ .result = ACCEPT,
+ .retval = 42,
+ /* Verifier rewrite for unpriv skips tail call here. */
+ .retval_unpriv = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
new file mode 100644
index 000000000000..7e50cb80873a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
@@ -0,0 +1,156 @@
+{
+ "pointer/scalar confusion in state equality check (way 1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+},
+{
+ "pointer/scalar confusion in state equality check (way 2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+},
+{
+ "liveness pruning and write screening",
+ .insns = {
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* branch conditions teach us nothing about R2 */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "varlen_map_value_access pruning",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "R0 unbounded memory access",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "search pruning: all branches should be verified (nop operation)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R6 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "search pruning: all branches should be verified (invalid stack access)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_JMP_A(1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "invalid read from stack off -16+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "allocated_stack",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
+ BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+ .insn_processed = 15,
+},
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
new file mode 100644
index 000000000000..d58db72fdfe8
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -0,0 +1,75 @@
+{
+ "check valid spill/fill",
+ .insns = {
+ /* spill R1(ctx) into stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ /* fill it back into R2 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
+ /* should be able to access R0 = *(R2 + 8) */
+ /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .retval = POINTER_VALUE,
+},
+{
+ "check valid spill/fill, skb mark",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+},
+{
+ "check corrupted spill/fill",
+ .insns = {
+ /* spill R1(ctx) into stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ /* mess up with R1 pointer on stack */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
+ /* fill back into R0 is fine for priv.
+ * R0 now becomes SCALAR_VALUE.
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ /* Load from R0 should fail. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .errstr = "R0 invalid mem access 'inv",
+ .result = REJECT,
+},
+{
+ "check corrupted spill/fill, LSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
+{
+ "check corrupted spill/fill, MSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c
new file mode 100644
index 000000000000..7276620ef242
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c
@@ -0,0 +1,317 @@
+{
+ "PTR_TO_STACK store/load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xfaceb00c,
+},
+{
+ "PTR_TO_STACK store/load - bad alignment on off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
+},
+{
+ "PTR_TO_STACK store/load - bad alignment on reg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
+},
+{
+ "PTR_TO_STACK store/load - out of bounds low",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=-79992 size=8",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+},
+{
+ "PTR_TO_STACK store/load - out of bounds high",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=0 size=8",
+},
+{
+ "PTR_TO_STACK check high 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "invalid stack off=0 size=1",
+ .result = REJECT,
+},
+{
+ "PTR_TO_STACK check high 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check high 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check high 7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "fp pointer offset",
+},
+{
+ "PTR_TO_STACK check low 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check low 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check low 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "invalid stack off=-513 size=1",
+ .result = REJECT,
+},
+{
+ "PTR_TO_STACK check low 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between fp pointer",
+},
+{
+ "PTR_TO_STACK check low 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check low 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check low 7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "fp pointer offset",
+},
+{
+ "PTR_TO_STACK mixed reg/k, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK mixed reg/k, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK mixed reg/k, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -3,
+},
+{
+ "PTR_TO_STACK reg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid stack off=0 size=1",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "stack pointer arithmetic",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
+ BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/uninit.c b/tools/testing/selftests/bpf/verifier/uninit.c
new file mode 100644
index 000000000000..987a5871ff1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/uninit.c
@@ -0,0 +1,39 @@
+{
+ "read uninitialized register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "read invalid register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R15 is invalid",
+ .result = REJECT,
+},
+{
+ "program doesn't init R0 before exit",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "program doesn't init R0 before exit in all branches",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
new file mode 100644
index 000000000000..dca58cf1a4ab
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -0,0 +1,521 @@
+{
+ "unpriv: return pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr",
+ .retval = POINTER_VALUE,
+},
+{
+ "unpriv: add const to pointer",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "unpriv: add pointer to pointer",
+ .insns = {
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 pointer += pointer",
+},
+{
+ "unpriv: neg pointer",
+ .insns = {
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 pointer arithmetic",
+},
+{
+ "unpriv: cmp pointer with const",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 pointer comparison",
+},
+{
+ "unpriv: cmp pointer with pointer",
+ .insns = {
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R10 pointer comparison",
+},
+{
+ "unpriv: check that printk is disallowed",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "unknown func bpf_trace_printk#6",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: pass pointer to helper function",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R4 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: indirectly pass pointer on stack to helper function",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+},
+{
+ "unpriv: mangle pointer on stack 1",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: mangle pointer on stack 2",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: read pointer from stack in small chunks",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid size",
+ .result = REJECT,
+},
+{
+ "unpriv: write pointer into ctx",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 leaks addr",
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "unpriv: spill/fill of ctx",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "unpriv: spill/fill of ctx 2",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of ctx 3",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=fp expected=ctx",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of ctx 4",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=inv expected=ctx",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - ctx and sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ /* if (sk) bpf_sk_release(sk) */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "type=ctx expected=sock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - leak sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "Unreleased reference",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "cannot write into socket",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers ldx",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
+ -(__s32)offsetof(struct bpf_perf_event_data,
+ sample_period) - 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "unpriv: write pointer into map elem value",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "alu32: mov u32 const",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_7, 0),
+ BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "unpriv: partial copy of pointer",
+ .insns = {
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 partial copy",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: pass pointer to tail_call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .errstr_unpriv = "R3 leaks addr into helper",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: cmp map pointer with zero",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: write into frame pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "frame pointer is read only",
+ .result = REJECT,
+},
+{
+ "unpriv: spill/fill frame pointer",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "frame pointer is read only",
+ .result = REJECT,
+},
+{
+ "unpriv: cmp of frame pointer",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: adding of fp",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: cmp of stack pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R2 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value.c b/tools/testing/selftests/bpf/verifier/value.c
new file mode 100644
index 000000000000..0e42592b1218
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value.c
@@ -0,0 +1,104 @@
+{
+ "map element value store of cleared call register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R1 !read_ok",
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+},
+{
+ "map element value with unaligned store",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value with unaligned load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, offsetof(struct test_val, foo)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_adj_spill.c b/tools/testing/selftests/bpf/verifier/value_adj_spill.c
new file mode 100644
index 000000000000..7135e8021b81
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_adj_spill.c
@@ -0,0 +1,43 @@
+{
+ "map element value is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+},
+{
+ "map element value or null is marked on register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
new file mode 100644
index 000000000000..7f6c232cd842
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
@@ -0,0 +1,94 @@
+{
+ "map element value illegal alu op, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 bitwise operator &= on pointer",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 32-bit pointer arithmetic prohibited",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 pointer arithmetic with /= operator",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value illegal alu op, 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_3, 4096),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 invalid mem access 'inv'",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_or_null.c b/tools/testing/selftests/bpf/verifier/value_or_null.c
new file mode 100644
index 000000000000..860d4a71cd83
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_or_null.c
@@ -0,0 +1,152 @@
+{
+ "multiple registers share map_lookup_elem result",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "invalid memory access with multiple map_lookup_elem calls",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = REJECT,
+ .errstr = "R4 !read_ok",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "valid indirect map_lookup_elem access with 2nd lookup in branch",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_2, 10),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "invalid map access from else condition",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT,
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
new file mode 100644
index 000000000000..9ab5ace83e02
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -0,0 +1,792 @@
+{
+ "map access: known scalar += value_ptr from different maps",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar from different maps",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 min value is outside of the array range",
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr from different maps, but same value properties",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: mixing value pointer and scalar, 1",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different pointers or scalars",
+ .retval = 0,
+},
+{
+ "map access: mixing value pointer and scalar, 2",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different maps or paths",
+ .retval = 0,
+},
+{
+ "sanitation: alu with different scalars",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100001),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 0x100000,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+},
+{
+ "map access: value_ptr += known scalar, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+},
+{
+ "map access: value_ptr += known scalar, 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, -2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 5",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: value_ptr += known scalar, 6",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: unknown scalar += value_ptr, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: unknown scalar += value_ptr, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: unknown scalar += value_ptr, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 0xabcdef12,
+},
+{
+ "map access: unknown scalar += value_ptr, 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_1, 19),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 max value is outside of the array range",
+ .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
+},
+{
+ "map access: value_ptr += unknown scalar, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += unknown scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: value_ptr += unknown scalar, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 pointer += pointer prohibited",
+},
+{
+ "map access: known scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+},
+{
+ "map access: value_ptr -= known scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+},
+{
+ "map access: value_ptr -= known scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: unknown scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+},
+{
+ "map access: value_ptr -= unknown scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is negative",
+},
+{
+ "map access: value_ptr -= unknown scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 pointer -= pointer prohibited",
+},
diff --git a/tools/testing/selftests/bpf/verifier/var_off.c b/tools/testing/selftests/bpf/verifier/var_off.c
new file mode 100644
index 000000000000..1e536ff121a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/var_off.c
@@ -0,0 +1,66 @@
+{
+ "variable-offset ctx access",
+ .insns = {
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ /* add it to skb. We now have either &skb->len or
+ * &skb->pkt_type, but we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ /* dereference it */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "variable ctx access var_off=(0x0; 0x4)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "variable-offset stack access",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+ /* add it to fp. We now have either fp-4 or fp-8, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "indirect variable-offset stack access",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+ /* add it to fp. We now have either fp-4 or fp-8, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it indirectly */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .errstr = "variable stack read R2",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xadd.c b/tools/testing/selftests/bpf/verifier/xadd.c
new file mode 100644
index 000000000000..c5de2e62cc8b
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xadd.c
@@ -0,0 +1,97 @@
+{
+ "xadd/w check unaligned stack",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "xadd/w check unaligned map",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = REJECT,
+ .errstr = "misaligned value access off",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "xadd/w check unaligned pkt",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 99),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
+ BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
+ BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R2 pkt is not allowed",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "xadd/w check whether src/dst got mangled, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+},
+{
+ "xadd/w check whether src/dst got mangled, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xdp.c b/tools/testing/selftests/bpf/verifier/xdp.c
new file mode 100644
index 000000000000..5ac390508139
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xdp.c
@@ -0,0 +1,14 @@
+{
+ "XDP, using ifindex from netdev",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, ingress_ifindex)),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
new file mode 100644
index 000000000000..bfb97383e6b5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
@@ -0,0 +1,900 @@
+{
+ "XDP pkt read, pkt_end mangling, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "XDP pkt read, pkt_end mangling, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh
new file mode 100755
index 000000000000..749ba3cfda1d
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test vetoing of FDB entries that mlxsw can not offload. This exercises several
+# different veto vectors to test various rollback scenarios in the vxlan driver.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ fdb_create_veto_test
+ fdb_replace_veto_test
+ fdb_append_veto_test
+ fdb_changelink_veto_test
+"
+NUM_NETIFS=2
+source $lib_dir/lib.sh
+
+setup_prepare()
+{
+ swp1=${NETIFS[p1]}
+ swp2=${NETIFS[p2]}
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link set dev $swp1 up
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 up
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link set dev vxlan0 master br0
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip link set dev vxlan0 nomaster
+ ip link del dev vxlan0
+
+ ip link set dev $swp2 down
+ ip link set dev $swp1 nomaster
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+fdb_create_veto_test()
+{
+ RET=0
+
+ bridge fdb add 01:02:03:04:05:06 dev vxlan0 self static \
+ dst 198.51.100.2 2>/dev/null
+ check_fail $? "multicast MAC not rejected"
+
+ bridge fdb add 01:02:03:04:05:06 dev vxlan0 self static \
+ dst 198.51.100.2 2>&1 >/dev/null | grep -q mlxsw_spectrum
+ check_err $? "multicast MAC rejected without extack"
+
+ log_test "vxlan FDB veto - create"
+}
+
+fdb_replace_veto_test()
+{
+ RET=0
+
+ bridge fdb add 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2
+ check_err $? "valid FDB rejected"
+
+ bridge fdb replace 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2 port 1234 2>/dev/null
+ check_fail $? "FDB with an explicit port not rejected"
+
+ bridge fdb replace 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2 port 1234 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with an explicit port rejected without extack"
+
+ log_test "vxlan FDB veto - replace"
+}
+
+fdb_append_veto_test()
+{
+ RET=0
+
+ bridge fdb add 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.2
+ check_err $? "valid FDB rejected"
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.3 port 1234 2>/dev/null
+ check_fail $? "FDB with an explicit port not rejected"
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.3 port 1234 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with an explicit port rejected without extack"
+
+ log_test "vxlan FDB veto - append"
+}
+
+fdb_changelink_veto_test()
+{
+ RET=0
+
+ ip link set dev vxlan0 type vxlan \
+ group 224.0.0.1 dev lo 2>/dev/null
+ check_fail $? "FDB with a multicast IP not rejected"
+
+ ip link set dev vxlan0 type vxlan \
+ group 224.0.0.1 dev lo 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with a multicast IP rejected without extack"
+
+ log_test "vxlan FDB veto - changelink"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 5ab1e5f43022..57cf8914910d 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -32,7 +32,7 @@ DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \
##############################################################################
# Sanity checks
-devlink -j resource show "$DEVLINK_DEV" &> /dev/null
+devlink help 2>&1 | grep resource &> /dev/null
if [ $? -ne 0 ]; then
echo "SKIP: iproute2 too old, missing devlink resource support"
exit 1
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh
new file mode 100755
index 000000000000..abb694397b86
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel without key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1
+ sw2_flat_create gre $ol2 $ul2
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh
new file mode 100755
index 000000000000..c4f373337e48
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1 key 233
+ sw2_flat_create gre $ol2 $ul2 key 233
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh
new file mode 100755
index 000000000000..a811130c0627
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1 ikey 111 okey 222
+ sw2_flat_create gre $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh
new file mode 100755
index 000000000000..05c5b3cf2f78
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1
+ sw2_hierarchical_create gre $ol2 $ul2
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh
new file mode 100755
index 000000000000..9b105dbca32a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1 key 22
+ sw2_hierarchical_create gre $ol2 $ul2 key 22
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh
new file mode 100755
index 000000000000..e275d25bd83a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1 ikey 111 okey 222
+ sw2_hierarchical_create gre $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_lib.sh b/tools/testing/selftests/net/forwarding/ipip_lib.sh
new file mode 100644
index 000000000000..30f36a57bae6
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_lib.sh
@@ -0,0 +1,349 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Handles creation and destruction of IP-in-IP or GRE tunnels over the given
+# topology. Supports both flat and hierarchical models.
+#
+# Flat Model:
+# Overlay and underlay share the same VRF.
+# SW1 uses default VRF so tunnel has no bound dev.
+# SW2 uses non-default VRF tunnel has a bound dev.
+# +-------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-----+
+# |
+# +-------------------|-----+
+# | SW1 | |
+# | $ol1 + |
+# | 192.0.2.2/28 |
+# | |
+# | + g1a (gre) |
+# | loc=192.0.2.65 |
+# | rem=192.0.2.66 --. |
+# | tos=inherit | |
+# | .------------------' |
+# | | |
+# | v |
+# | + $ul1.111 (vlan) |
+# | | 192.0.2.129/28 |
+# | \ |
+# | \_______ |
+# | | |
+# |VRF default + $ul1 |
+# +------------|------------+
+# |
+# +------------|------------+
+# | SW2 + $ul2 |
+# | _______| |
+# | / |
+# | / |
+# | + $ul2.111 (vlan) |
+# | ^ 192.0.2.130/28 |
+# | | |
+# | | |
+# | '------------------. |
+# | + g2a (gre) | |
+# | loc=192.0.2.66 | |
+# | rem=192.0.2.65 --' |
+# | tos=inherit |
+# | |
+# | $ol2 + |
+# | 192.0.2.17/28 | |
+# | VRF v$ol2 | |
+# +-------------------|-----+
+# |
+# +-------------------|-----+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +-------------------------+
+#
+# Hierarchical model:
+# The tunnel is bound to a device in a different VRF
+#
+# +---------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-------+
+# |
+# +-------------------|-------+
+# | SW1 | |
+# | +-----------------|-----+ |
+# | | $ol1 + | |
+# | | 192.0.2.2/28 | |
+# | | | |
+# | | + g1a (gre) | |
+# | | rem=192.0.2.66 | |
+# | | tos=inherit | |
+# | | loc=192.0.2.65 | |
+# | | ^ | |
+# | | VRF v$ol1 | | |
+# | +-----------|-----------+ |
+# | | |
+# | +-----------|-----------+ |
+# | | VRF v$ul1 | | |
+# | | | | |
+# | | | | |
+# | | v | |
+# | | dummy1 + | |
+# | | 192.0.2.65 | |
+# | | .-------' | |
+# | | | | |
+# | | v | |
+# | | + $ul1.111 (vlan) | |
+# | | | 192.0.2.129/28 | |
+# | | \ | |
+# | | \_____ | |
+# | | | | |
+# | | + $ul1 | |
+# | +----------|------------+ |
+# +------------|--------------+
+# |
+# +------------|--------------+
+# | SW2 | |
+# | +----------|------------+ |
+# | | + $ul2 | |
+# | | _____| | |
+# | | / | |
+# | | / | |
+# | | | $ul2.111 (vlan) | |
+# | | + 192.0.2.130/28 | |
+# | | ^ | |
+# | | | | |
+# | | '-------. | |
+# | | dummy2 + | |
+# | | 192.0.2.66 | |
+# | | ^ | |
+# | | | | |
+# | | | | |
+# | | VRF v$ul2 | | |
+# | +-----------|-----------+ |
+# | | |
+# | +-----------|-----------+ |
+# | | VRF v$ol2 | | |
+# | | | | |
+# | | v | |
+# | | g2a (gre)+ | |
+# | | loc=192.0.2.66 | |
+# | | rem=192.0.2.65 | |
+# | | tos=inherit | |
+# | | | |
+# | | $ol2 + | |
+# | | 192.0.2.17/28 | | |
+# | +-----------------|-----+ |
+# +-------------------|-------+
+# |
+# +-------------------|-------+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +---------------------------+
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.18/28
+ ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ simple_if_fini $h2 192.0.2.18/28
+}
+
+sw1_flat_create()
+{
+ local type=$1; shift
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip link set dev $ol1 up
+ __addr_add_del $ol1 add "192.0.2.2/28"
+
+ ip link set dev $ul1 up
+ vlan_create $ul1 111 "" 192.0.2.129/28
+
+ tunnel_create g1a $type 192.0.2.65 192.0.2.66 tos inherit "$@"
+ ip link set dev g1a up
+ __addr_add_del g1a add "192.0.2.65/32"
+
+ ip route add 192.0.2.66/32 via 192.0.2.130
+
+ ip route add 192.0.2.16/28 nexthop dev g1a
+}
+
+sw1_flat_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip route del 192.0.2.16/28
+
+ ip route del 192.0.2.66/32 via 192.0.2.130
+ __simple_if_fini g1a 192.0.2.65/32
+ tunnel_destroy g1a
+
+ vlan_destroy $ul1 111
+ __simple_if_fini $ul1
+ __simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_flat_create()
+{
+ local type=$1; shift
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 192.0.2.17/28
+ __simple_if_init $ul2 v$ol2
+ vlan_create $ul2 111 v$ol2 192.0.2.130/28
+
+ tunnel_create g2a $type 192.0.2.66 192.0.2.65 tos inherit dev v$ol2 \
+ "$@"
+ __simple_if_init g2a v$ol2 192.0.2.66/32
+
+ ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ ip route add vrf v$ol2 192.0.2.0/28 nexthop dev g2a
+}
+
+sw2_flat_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip route del vrf v$ol2 192.0.2.0/28
+
+ ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ __simple_if_fini g2a 192.0.2.66/32
+ tunnel_destroy g2a
+
+ vlan_destroy $ul2 111
+ __simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+sw1_hierarchical_create()
+{
+ local type=$1; shift
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ simple_if_init $ol1 192.0.2.2/28
+ simple_if_init $ul1
+ ip link add name dummy1 type dummy
+ __simple_if_init dummy1 v$ul1 192.0.2.65/32
+
+ vlan_create $ul1 111 v$ul1 192.0.2.129/28
+ tunnel_create g1a $type 192.0.2.65 192.0.2.66 tos inherit dev dummy1 \
+ "$@"
+ ip link set dev g1a master v$ol1
+
+ ip route add vrf v$ul1 192.0.2.66/32 via 192.0.2.130
+ ip route add vrf v$ol1 192.0.2.16/28 nexthop dev g1a
+}
+
+sw1_hierarchical_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip route del vrf v$ol1 192.0.2.16/28
+ ip route del vrf v$ul1 192.0.2.66/32
+
+ tunnel_destroy g1a
+ vlan_destroy $ul1 111
+
+ __simple_if_fini dummy1 192.0.2.65/32
+ ip link del dev dummy1
+
+ simple_if_fini $ul1
+ simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_hierarchical_create()
+{
+ local type=$1; shift
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 192.0.2.17/28
+ simple_if_init $ul2
+
+ ip link add name dummy2 type dummy
+ __simple_if_init dummy2 v$ul2 192.0.2.66/32
+
+ vlan_create $ul2 111 v$ul2 192.0.2.130/28
+ tunnel_create g2a $type 192.0.2.66 192.0.2.65 tos inherit dev dummy2 \
+ "$@"
+ ip link set dev g2a master v$ol2
+
+ ip route add vrf v$ul2 192.0.2.65/32 via 192.0.2.129
+ ip route add vrf v$ol2 192.0.2.0/28 nexthop dev g2a
+}
+
+sw2_hierarchical_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip route del vrf v$ol2 192.0.2.0/28
+ ip route del vrf v$ul2 192.0.2.65/32
+
+ tunnel_destroy g2a
+ vlan_destroy $ul2 111
+
+ __simple_if_fini dummy2 192.0.2.66/32
+ ip link del dev dummy2
+
+ simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+topo_mtu_change()
+{
+ local mtu=$1
+
+ ip link set mtu $mtu dev $h1
+ ip link set mtu $mtu dev $ol1
+ ip link set mtu $mtu dev g1a
+ ip link set mtu $mtu dev $ul1
+ ip link set mtu $mtu dev $ul1.111
+ ip link set mtu $mtu dev $h2
+ ip link set mtu $mtu dev $ol2
+ ip link set mtu $mtu dev g2a
+ ip link set mtu $mtu dev $ul2
+ ip link set mtu $mtu dev $ul2.111
+}
+
+test_mtu_change()
+{
+ local encap=$1; shift
+
+ RET=0
+
+ ping_do $h1 192.0.2.18 "-s 1800 -w 3"
+ check_fail $? "ping $encap should not pass with size 1800"
+
+ RET=0
+
+ topo_mtu_change 2000
+ ping_do $h1 192.0.2.18 "-s 1800 -w 3"
+ check_err $?
+ log_test "ping $encap packet size 1800 after MTU change"
+}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 3f248d1f5b91..c1f16bb992dc 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -211,7 +211,7 @@ log_test()
return 1
fi
- printf "TEST: %-60s [PASS]\n" "$test_name $opt_str"
+ printf "TEST: %-60s [ OK ]\n" "$test_name $opt_str"
return 0
}
diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
new file mode 100755
index 000000000000..a0b5f57d6bd3
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
@@ -0,0 +1,567 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +---------------------------+ +------------------------------+
+# | vrf-h1 | | vrf-h2 |
+# | + $h1 | | + $h2 |
+# | | 10.1.1.101/24 | | | 10.1.2.101/24 |
+# | | default via 10.1.1.1 | | | default via 10.1.2.1 |
+# +----|----------------------+ +----|-------------------------+
+# | |
+# +----|--------------------------------------------|-------------------------+
+# | SW | | |
+# | +--|--------------------------------------------|-----------------------+ |
+# | | + $swp1 br1 + $swp2 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.1 local 10.0.0.1 | |
+# | | remote 10.0.0.2 remote 10.0.0.2 | |
+# | | id 1000 id 2000 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | | |
+# | | + vlan10 vlan20 + | |
+# | | | 10.1.1.11/24 10.1.2.11/24 | | |
+# | | | | | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | 10.1.1.1/24 10.1.2.1/24 | |
+# | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | |
+# | | vrf-green | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | + $rp1 +lo |
+# | | 192.0.2.1/24 10.0.0.1/32 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | vrf-spine |
+# | + $rp2 |
+# | 192.0.2.2/24 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | |
+# | + v1 (veth) |
+# | | 192.0.3.2/24 |
+# +----|--------------------------------------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | + v2 (veth) +lo NS1 (netns) |
+# | 192.0.3.1/24 10.0.0.2/32 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | vrf-green | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | | 10.1.1.1/24 10.1.2.1/24 | | |
+# | | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | | |
+# | | | | | |
+# | | + vlan10 vlan20 + | |
+# | | | 10.1.1.12/24 10.1.2.12/24 | | |
+# | | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.2 local 10.0.0.2 | |
+# | | remote 10.0.0.1 remote 10.0.0.1 | |
+# | | id 1000 id 2000 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + w1 (veth) + w3 (veth) | |
+# | | | vid 10 pvid untagged br1 | vid 20 pvid untagged | |
+# | +--|------------------------------------------|-------------------------+ |
+# | | | |
+# | | | |
+# | +--|----------------------+ +--|-------------------------+ |
+# | | | vrf-h1 | | | vrf-h2 | |
+# | | + w2 (veth) | | + w4 (veth) | |
+# | | 10.1.1.102/24 | | 10.1.2.102/24 | |
+# | | default via 10.1.1.1 | | default via 10.1.2.1 | |
+# | +-------------------------+ +----------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ arp_decap
+ arp_suppression
+"
+NUM_NETIFS=6
+source lib.sh
+
+require_command $ARPING
+
+hx_create()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ vrf_create $vrf_name
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $vrf_name up
+ ip link set dev $if_name up
+
+ ip address add $ip_addr/24 dev $if_name
+ ip neigh replace $gw_ip lladdr 00:00:5e:00:01:01 nud permanent \
+ dev $if_name
+ ip route add default vrf $vrf_name nexthop via $gw_ip
+}
+export -f hx_create
+
+hx_destroy()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ ip route del default vrf $vrf_name nexthop via $gw_ip
+ ip neigh del $gw_ip dev $if_name
+ ip address del $ip_addr/24 dev $if_name
+
+ ip link set dev $if_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ hx_create "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h1_destroy()
+{
+ hx_destroy "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h2_create()
+{
+ hx_create "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+h2_destroy()
+{
+ hx_destroy "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ ip address add dev $rp1 192.0.2.1/24
+ ip route add 10.0.0.2/32 nexthop via 192.0.2.2
+
+ ip link add name vx10 type vxlan id 1000 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 10 dev $swp1 pvid untagged
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+ bridge vlan add vid 20 dev $swp2 pvid untagged
+
+ ip address add 10.0.0.1/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.11/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.11/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+
+switch_destroy()
+{
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+ bridge vlan del vid 20 dev br1 self
+ bridge vlan del vid 10 dev br1 self
+
+ ip link del dev vlan20
+
+ ip link del dev vlan10
+
+ vrf_destroy "vrf-green"
+
+ ip address del 10.0.0.1/32 dev lo
+
+ bridge vlan del vid 20 dev $swp2
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 10 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 20 dev vx20
+ ip link set dev vx20 nomaster
+
+ ip link set dev vx20 down
+ ip link del dev vx20
+
+ bridge vlan del vid 10 dev vx10
+ ip link set dev vx10 nomaster
+
+ ip link set dev vx10 down
+ ip link del dev vx10
+
+ ip route del 10.0.0.2/32 nexthop via 192.0.2.2
+ ip address del dev $rp1 192.0.2.1/24
+ ip link set dev $rp1 down
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+spine_create()
+{
+ vrf_create "vrf-spine"
+ ip link set dev $rp2 master vrf-spine
+ ip link set dev v1 master vrf-spine
+ ip link set dev vrf-spine up
+ ip link set dev $rp2 up
+ ip link set dev v1 up
+
+ ip address add 192.0.2.2/24 dev $rp2
+ ip address add 192.0.3.2/24 dev v1
+
+ ip route add 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+ ip route add 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+}
+
+spine_destroy()
+{
+ ip route del 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+ ip route del 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+
+ ip address del 192.0.3.2/24 dev v1
+ ip address del 192.0.2.2/24 dev $rp2
+
+ ip link set dev v1 down
+ ip link set dev $rp2 down
+ vrf_destroy "vrf-spine"
+}
+
+ns_h1_create()
+{
+ hx_create "vrf-h1" w2 10.1.1.102 10.1.1.1
+}
+export -f ns_h1_create
+
+ns_h2_create()
+{
+ hx_create "vrf-h2" w4 10.1.2.102 10.1.2.1
+}
+export -f ns_h2_create
+
+ns_switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ ip link set dev br1 up
+
+ ip link set dev v2 up
+ ip address add dev v2 192.0.3.1/24
+ ip route add 10.0.0.1/32 nexthop via 192.0.3.2
+
+ ip link add name vx10 type vxlan id 1000 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev w1 master br1
+ ip link set dev w1 up
+ bridge vlan add vid 10 dev w1 pvid untagged
+
+ ip link set dev w3 master br1
+ ip link set dev w3 up
+ bridge vlan add vid 20 dev w3 pvid untagged
+
+ ip address add 10.0.0.2/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.12/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.12/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+export -f ns_switch_create
+
+ns_init()
+{
+ ip link add name w1 type veth peer name w2
+ ip link add name w3 type veth peer name w4
+
+ ip link set dev lo up
+
+ ns_h1_create
+ ns_h2_create
+ ns_switch_create
+}
+export -f ns_init
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 ns_init
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+macs_populate()
+{
+ local mac1=$1; shift
+ local mac2=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local dst=$1; shift
+
+ bridge fdb add $mac1 dev vx10 self master extern_learn static \
+ dst $dst vlan 10
+ bridge fdb add $mac2 dev vx20 self master extern_learn static \
+ dst $dst vlan 20
+
+ ip neigh add $ip1 lladdr $mac1 nud noarp dev vlan10 \
+ extern_learn
+ ip neigh add $ip2 lladdr $mac2 nud noarp dev vlan20 \
+ extern_learn
+}
+export -f macs_populate
+
+macs_initialize()
+{
+ local h1_ns_mac=$(in_ns ns1 mac_get w2)
+ local h2_ns_mac=$(in_ns ns1 mac_get w4)
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ macs_populate $h1_ns_mac $h2_ns_mac 10.1.1.102 10.1.2.102 10.0.0.2
+ in_ns ns1 macs_populate $h1_mac $h2_mac 10.1.1.101 10.1.2.101 10.0.0.1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ spine_create
+ ns1_create
+
+ macs_initialize
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns1_destroy
+ spine_destroy
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 10.1.2.101 ": local->local vid 10->vid 20"
+ ping_test $h1 10.1.1.102 ": local->remote vid 10->vid 10"
+ ping_test $h2 10.1.2.102 ": local->remote vid 20->vid 20"
+ ping_test $h1 10.1.2.102 ": local->remote vid 10->vid 20"
+ ping_test $h2 10.1.1.102 ": local->remote vid 20->vid 10"
+}
+
+arp_decap()
+{
+ # Repeat the ping tests, but without populating the neighbours. This
+ # makes sure we correctly decapsulate ARP packets
+ log_info "deleting neighbours from vlan interfaces"
+
+ ip neigh del 10.1.1.102 dev vlan10
+ ip neigh del 10.1.2.102 dev vlan20
+
+ ping_ipv4
+
+ ip neigh replace 10.1.1.102 lladdr $(in_ns ns1 mac_get w2) nud noarp \
+ dev vlan10 extern_learn
+ ip neigh replace 10.1.2.102 lladdr $(in_ns ns1 mac_get w4) nud noarp \
+ dev vlan20 extern_learn
+}
+
+arp_suppression_compare()
+{
+ local expect=$1; shift
+ local actual=$(in_ns ns1 tc_rule_stats_get vx10 1 ingress)
+
+ (( expect == actual ))
+ check_err $? "expected $expect arps got $actual"
+}
+
+arp_suppression()
+{
+ ip link set dev vx10 type bridge_slave neigh_suppress on
+
+ in_ns ns1 tc qdisc add dev vx10 clsact
+ in_ns ns1 tc filter add dev vx10 ingress proto arp pref 1 handle 101 \
+ flower dst_mac ff:ff:ff:ff:ff:ff arp_tip 10.1.1.102 arp_op \
+ request action pass
+
+ # The neighbour is configured on the SVI and ARP suppression is on, so
+ # the ARP request should be suppressed
+ RET=0
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 0
+
+ log_test "neigh_suppress: on / neigh exists: yes"
+
+ # Delete the neighbour from the the SVI. A single ARP request should be
+ # received by the remote VTEP
+ RET=0
+
+ ip neigh del 10.1.1.102 dev vlan10
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 1
+
+ log_test "neigh_suppress: on / neigh exists: no"
+
+ # Turn off ARP suppression and make sure ARP is not suppressed,
+ # regardless of neighbour existence on the SVI
+ RET=0
+
+ ip neigh del 10.1.1.102 dev vlan10 &> /dev/null
+ ip link set dev vx10 type bridge_slave neigh_suppress off
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 2
+
+ log_test "neigh_suppress: off / neigh exists: no"
+
+ RET=0
+
+ ip neigh add 10.1.1.102 lladdr $(in_ns ns1 mac_get w2) nud noarp \
+ dev vlan10 extern_learn
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 3
+
+ log_test "neigh_suppress: off / neigh exists: yes"
+
+ in_ns ns1 tc qdisc del dev vx10 clsact
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
new file mode 100755
index 000000000000..1209031bc794
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
@@ -0,0 +1,551 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +---------------------------+ +------------------------------+
+# | vrf-h1 | | vrf-h2 |
+# | + $h1 | | + $h2 |
+# | | 10.1.1.101/24 | | | 10.1.2.101/24 |
+# | | default via 10.1.1.1 | | | default via 10.1.2.1 |
+# +----|----------------------+ +----|-------------------------+
+# | |
+# +----|--------------------------------------------|-------------------------+
+# | SW | | |
+# | +--|--------------------------------------------|-----------------------+ |
+# | | + $swp1 br1 + $swp2 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.1 local 10.0.0.1 | |
+# | | remote 10.0.0.2 remote 10.0.0.2 | |
+# | | id 1010 id 1020 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx4001 | |
+# | | local 10.0.0.1 | |
+# | | remote 10.0.0.2 | |
+# | | id 104001 | |
+# | | dstport 4789 | |
+# | | vid 4001 pvid untagged | |
+# | | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | | | |
+# | | + vlan10 | vlan20 + | |
+# | | | 10.1.1.11/24 | 10.1.2.11/24 | | |
+# | | | | | | |
+# | | + vlan10-v (macvlan) + vlan20-v (macvlan) + | |
+# | | 10.1.1.1/24 vlan4001 10.1.2.1/24 | |
+# | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | |
+# | | vrf-green | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | + $rp1 +lo |
+# | | 192.0.2.1/24 10.0.0.1/32 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | vrf-spine |
+# | + $rp2 |
+# | 192.0.2.2/24 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | |
+# | + v1 (veth) |
+# | | 192.0.3.2/24 |
+# +----|--------------------------------------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | + v2 (veth) +lo NS1 (netns) |
+# | 192.0.3.1/24 10.0.0.2/32 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | vrf-green | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | | 10.1.1.1/24 10.1.2.1/24 | | |
+# | | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | | |
+# | | | vlan4001 | | |
+# | | + vlan10 + vlan20 + | |
+# | | | 10.1.1.12/24 | 10.1.2.12/24 | | |
+# | | | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.2 local 10.0.0.2 | |
+# | | remote 10.0.0.1 remote 10.0.0.1 | |
+# | | id 1010 id 1020 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx4001 | |
+# | | local 10.0.0.2 | |
+# | | remote 10.0.0.1 | |
+# | | id 104001 | |
+# | | dstport 4789 | |
+# | | vid 4001 pvid untagged | |
+# | | | |
+# | | + w1 (veth) + w3 (veth) | |
+# | | | vid 10 pvid untagged br1 | vid 20 pvid untagged | |
+# | +--|------------------------------------------|-------------------------+ |
+# | | | |
+# | | | |
+# | +--|----------------------+ +--|-------------------------+ |
+# | | | vrf-h1 | | | vrf-h2 | |
+# | | + w2 (veth) | | + w4 (veth) | |
+# | | 10.1.1.102/24 | | 10.1.2.102/24 | |
+# | | default via 10.1.1.1 | | default via 10.1.2.1 | |
+# | +-------------------------+ +----------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+"
+NUM_NETIFS=6
+source lib.sh
+
+hx_create()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ vrf_create $vrf_name
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $vrf_name up
+ ip link set dev $if_name up
+
+ ip address add $ip_addr/24 dev $if_name
+ ip neigh replace $gw_ip lladdr 00:00:5e:00:01:01 nud permanent \
+ dev $if_name
+ ip route add default vrf $vrf_name nexthop via $gw_ip
+}
+export -f hx_create
+
+hx_destroy()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ ip route del default vrf $vrf_name nexthop via $gw_ip
+ ip neigh del $gw_ip dev $if_name
+ ip address del $ip_addr/24 dev $if_name
+
+ ip link set dev $if_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ hx_create "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h1_destroy()
+{
+ hx_destroy "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h2_create()
+{
+ hx_create "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+h2_destroy()
+{
+ hx_destroy "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ ip address add dev $rp1 192.0.2.1/24
+ ip route add 10.0.0.2/32 nexthop via 192.0.2.2
+
+ ip link add name vx10 type vxlan id 1010 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 1020 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 10 dev $swp1 pvid untagged
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+ bridge vlan add vid 20 dev $swp2 pvid untagged
+
+ ip link add name vx4001 type vxlan id 104001 \
+ local 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx4001 up
+
+ ip link set dev vx4001 master br1
+ bridge vlan add vid 4001 dev vx4001 pvid untagged
+
+ ip address add 10.0.0.1/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.11/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.11/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ ip link add link br1 name vlan4001 up master vrf-green \
+ type vlan id 4001
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+ bridge vlan add vid 4001 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+
+switch_destroy()
+{
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+ bridge vlan del vid 4001 dev br1 self
+ bridge vlan del vid 20 dev br1 self
+ bridge vlan del vid 10 dev br1 self
+
+ ip link del dev vlan4001
+
+ ip link del dev vlan20
+
+ ip link del dev vlan10
+
+ vrf_destroy "vrf-green"
+
+ ip address del 10.0.0.1/32 dev lo
+
+ bridge vlan del vid 20 dev $swp2
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 10 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 4001 dev vx4001
+ ip link set dev vx4001 nomaster
+
+ ip link set dev vx4001 down
+ ip link del dev vx4001
+
+ bridge vlan del vid 20 dev vx20
+ ip link set dev vx20 nomaster
+
+ ip link set dev vx20 down
+ ip link del dev vx20
+
+ bridge vlan del vid 10 dev vx10
+ ip link set dev vx10 nomaster
+
+ ip link set dev vx10 down
+ ip link del dev vx10
+
+ ip route del 10.0.0.2/32 nexthop via 192.0.2.2
+ ip address del dev $rp1 192.0.2.1/24
+ ip link set dev $rp1 down
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+spine_create()
+{
+ vrf_create "vrf-spine"
+ ip link set dev $rp2 master vrf-spine
+ ip link set dev v1 master vrf-spine
+ ip link set dev vrf-spine up
+ ip link set dev $rp2 up
+ ip link set dev v1 up
+
+ ip address add 192.0.2.2/24 dev $rp2
+ ip address add 192.0.3.2/24 dev v1
+
+ ip route add 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+ ip route add 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+}
+
+spine_destroy()
+{
+ ip route del 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+ ip route del 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+
+ ip address del 192.0.3.2/24 dev v1
+ ip address del 192.0.2.2/24 dev $rp2
+
+ ip link set dev v1 down
+ ip link set dev $rp2 down
+ vrf_destroy "vrf-spine"
+}
+
+ns_h1_create()
+{
+ hx_create "vrf-h1" w2 10.1.1.102 10.1.1.1
+}
+export -f ns_h1_create
+
+ns_h2_create()
+{
+ hx_create "vrf-h2" w4 10.1.2.102 10.1.2.1
+}
+export -f ns_h2_create
+
+ns_switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ ip link set dev br1 up
+
+ ip link set dev v2 up
+ ip address add dev v2 192.0.3.1/24
+ ip route add 10.0.0.1/32 nexthop via 192.0.3.2
+
+ ip link add name vx10 type vxlan id 1010 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 1020 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link add name vx4001 type vxlan id 104001 \
+ local 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx4001 up
+
+ ip link set dev vx4001 master br1
+ bridge vlan add vid 4001 dev vx4001 pvid untagged
+
+ ip link set dev w1 master br1
+ ip link set dev w1 up
+ bridge vlan add vid 10 dev w1 pvid untagged
+
+ ip link set dev w3 master br1
+ ip link set dev w3 up
+ bridge vlan add vid 20 dev w3 pvid untagged
+
+ ip address add 10.0.0.2/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.12/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.12/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ ip link add link br1 name vlan4001 up master vrf-green \
+ type vlan id 4001
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+ bridge vlan add vid 4001 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+export -f ns_switch_create
+
+ns_init()
+{
+ ip link add name w1 type veth peer name w2
+ ip link add name w3 type veth peer name w4
+
+ ip link set dev lo up
+
+ ns_h1_create
+ ns_h2_create
+ ns_switch_create
+}
+export -f ns_init
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 ns_init
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+__l2_vni_init()
+{
+ local mac1=$1; shift
+ local mac2=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local dst=$1; shift
+
+ bridge fdb add $mac1 dev vx10 self master extern_learn static \
+ dst $dst vlan 10
+ bridge fdb add $mac2 dev vx20 self master extern_learn static \
+ dst $dst vlan 20
+
+ ip neigh add $ip1 lladdr $mac1 nud noarp dev vlan10 \
+ extern_learn
+ ip neigh add $ip2 lladdr $mac2 nud noarp dev vlan20 \
+ extern_learn
+}
+export -f __l2_vni_init
+
+l2_vni_init()
+{
+ local h1_ns_mac=$(in_ns ns1 mac_get w2)
+ local h2_ns_mac=$(in_ns ns1 mac_get w4)
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ __l2_vni_init $h1_ns_mac $h2_ns_mac 10.1.1.102 10.1.2.102 10.0.0.2
+ in_ns ns1 __l2_vni_init $h1_mac $h2_mac 10.1.1.101 10.1.2.101 10.0.0.1
+}
+
+__l3_vni_init()
+{
+ local mac=$1; shift
+ local vtep_ip=$1; shift
+ local host1_ip=$1; shift
+ local host2_ip=$1; shift
+
+ bridge fdb add $mac dev vx4001 self master extern_learn static \
+ dst $vtep_ip vlan 4001
+
+ ip neigh add $vtep_ip lladdr $mac nud noarp dev vlan4001 extern_learn
+
+ ip route add $host1_ip/32 vrf vrf-green nexthop via $vtep_ip \
+ dev vlan4001 onlink
+ ip route add $host2_ip/32 vrf vrf-green nexthop via $vtep_ip \
+ dev vlan4001 onlink
+}
+export -f __l3_vni_init
+
+l3_vni_init()
+{
+ local vlan4001_ns_mac=$(in_ns ns1 mac_get vlan4001)
+ local vlan4001_mac=$(mac_get vlan4001)
+
+ __l3_vni_init $vlan4001_ns_mac 10.0.0.2 10.1.1.102 10.1.2.102
+ in_ns ns1 __l3_vni_init $vlan4001_mac 10.0.0.1 10.1.1.101 10.1.2.101
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ spine_create
+ ns1_create
+
+ l2_vni_init
+ l3_vni_init
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns1_destroy
+ spine_destroy
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 10.1.2.101 ": local->local vid 10->vid 20"
+ ping_test $h1 10.1.1.102 ": local->remote vid 10->vid 10"
+ ping_test $h2 10.1.2.102 ": local->remote vid 20->vid 20"
+ ping_test $h1 10.1.2.102 ": local->remote vid 10->vid 20"
+ ping_test $h2 10.1.1.102 ": local->remote vid 20->vid 10"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
index 5d56cc0838f6..c0c9ecb891e1 100644
--- a/tools/testing/selftests/net/ip_defrag.c
+++ b/tools/testing/selftests/net/ip_defrag.c
@@ -20,6 +20,7 @@ static bool cfg_do_ipv4;
static bool cfg_do_ipv6;
static bool cfg_verbose;
static bool cfg_overlap;
+static bool cfg_permissive;
static unsigned short cfg_port = 9000;
const struct in_addr addr4 = { .s_addr = __constant_htonl(INADDR_LOOPBACK + 2) };
@@ -35,7 +36,7 @@ const struct in6_addr addr6 = IN6ADDR_LOOPBACK_INIT;
static int payload_len;
static int max_frag_len;
-#define MSG_LEN_MAX 60000 /* Max UDP payload length. */
+#define MSG_LEN_MAX 10000 /* Max UDP payload length. */
#define IP4_MF (1u << 13) /* IPv4 MF flag. */
#define IP6_MF (1) /* IPv6 MF flag. */
@@ -59,13 +60,14 @@ static void recv_validate_udp(int fd_udp)
msg_counter++;
if (cfg_overlap) {
- if (ret != -1)
- error(1, 0, "recv: expected timeout; got %d",
- (int)ret);
- if (errno != ETIMEDOUT && errno != EAGAIN)
- error(1, errno, "recv: expected timeout: %d",
- errno);
- return; /* OK */
+ if (ret == -1 && (errno == ETIMEDOUT || errno == EAGAIN))
+ return; /* OK */
+ if (!cfg_permissive) {
+ if (ret != -1)
+ error(1, 0, "recv: expected timeout; got %d",
+ (int)ret);
+ error(1, errno, "recv: expected timeout: %d", errno);
+ }
}
if (ret == -1)
@@ -203,7 +205,6 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
{
struct ip *iphdr = (struct ip *)ip_frame;
struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
- const bool ipv4 = !ipv6;
int res;
int offset;
int frag_len;
@@ -251,7 +252,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
}
/* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
- if (ipv4 && !cfg_overlap && (rand() % 100 < 20) &&
+ if (!cfg_overlap && (rand() % 100 < 20) &&
(payload_len > 9 * max_frag_len)) {
offset = 6 * max_frag_len;
while (offset < (UDP_HLEN + payload_len)) {
@@ -276,41 +277,38 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
/* IPv4 ignores duplicates, so randomly send a duplicate. */
- if (ipv4 && (1 == rand() % 100))
+ if (rand() % 100 == 1)
send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
if (cfg_overlap) {
- /* Send an extra random fragment. */
+ /* Send an extra random fragment.
+ *
+ * Duplicates and some fragments completely inside
+ * previously sent fragments are dropped/ignored. So
+ * random offset and frag_len can result in a dropped
+ * fragment instead of a dropped queue/packet. Thus we
+ * hard-code offset and frag_len.
+ */
+ if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
+ /* not enough payload for random offset and frag_len. */
+ offset = 8;
+ frag_len = UDP_HLEN + max_frag_len;
+ } else {
+ offset = rand() % (payload_len / 2);
+ frag_len = 2 * max_frag_len + 1 + rand() % 256;
+ }
if (ipv6) {
struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
/* sendto() returns EINVAL if offset + frag_len is too small. */
- offset = rand() % (UDP_HLEN + payload_len - 1);
- frag_len = max_frag_len + rand() % 256;
/* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
frag_len &= ~0x7;
fraghdr->ip6f_offlg = htons(offset / 8 | IP6_MF);
ip6hdr->ip6_plen = htons(frag_len);
frag_len += IP6_HLEN;
} else {
- /* In IPv4, duplicates and some fragments completely inside
- * previously sent fragments are dropped/ignored. So
- * random offset and frag_len can result in a dropped
- * fragment instead of a dropped queue/packet. So we
- * hard-code offset and frag_len.
- *
- * See ade446403bfb ("net: ipv4: do not handle duplicate
- * fragments as overlapping").
- */
- if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
- /* not enough payload to play with random offset and frag_len. */
- offset = 8;
- frag_len = IP4_HLEN + UDP_HLEN + max_frag_len;
- } else {
- offset = rand() % (payload_len / 2);
- frag_len = 2 * max_frag_len + 1 + rand() % 256;
- }
+ frag_len += IP4_HLEN;
iphdr->ip_off = htons(offset / 8 | IP4_MF);
iphdr->ip_len = htons(frag_len);
}
@@ -327,7 +325,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
/* IPv4 ignores duplicates, so randomly send a duplicate. */
- if (ipv4 && (1 == rand() % 100))
+ if (rand() % 100 == 1)
send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
@@ -342,7 +340,7 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
*/
struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
int idx;
- int min_frag_len = ipv6 ? 1280 : 8;
+ int min_frag_len = 8;
/* Initialize the payload. */
for (idx = 0; idx < MSG_LEN_MAX; ++idx)
@@ -434,7 +432,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "46ov")) != -1) {
+ while ((c = getopt(argc, argv, "46opv")) != -1) {
switch (c) {
case '4':
cfg_do_ipv4 = true;
@@ -445,6 +443,9 @@ static void parse_opts(int argc, char **argv)
case 'o':
cfg_overlap = true;
break;
+ case 'p':
+ cfg_permissive = true;
+ break;
case 'v':
cfg_verbose = true;
break;
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
index 7dd79a9efb17..15d3489ecd9c 100755
--- a/tools/testing/selftests/net/ip_defrag.sh
+++ b/tools/testing/selftests/net/ip_defrag.sh
@@ -20,6 +20,10 @@ setup() {
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_high_thresh=9000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_timeout=1 >/dev/null 2>&1
+
# DST cache can get full with a lot of frags, with GC not keeping up with the test.
ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
}
@@ -43,4 +47,16 @@ ip netns exec "${NETNS}" ./ip_defrag -6
echo "ipv6 defrag with overlaps"
ip netns exec "${NETNS}" ./ip_defrag -6o
+# insert an nf_conntrack rule so that the codepath in nf_conntrack_reasm.c taken
+ip netns exec "${NETNS}" ip6tables -A INPUT -m conntrack --ctstate INVALID -j ACCEPT
+
+echo "ipv6 nf_conntrack defrag"
+ip netns exec "${NETNS}" ./ip_defrag -6
+
+echo "ipv6 nf_conntrack defrag with overlaps"
+# netfilter will drop some invalid packets, so we run the test in
+# permissive mode: i.e. pass the test if the packet is correctly assembled
+# even if we sent an overlap
+ip netns exec "${NETNS}" ./ip_defrag -6op
+
echo "all tests done"
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index fac68d710f35..ff68ed19c0ef 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -452,10 +452,12 @@ TEST_F(tls, recv_partial)
memset(recv_mem, 0, sizeof(recv_mem));
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
+ MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
memset(recv_mem, 0, sizeof(recv_mem));
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
+ MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
0);
}
@@ -565,10 +567,10 @@ TEST_F(tls, recv_peek_large_buf_mult_recs)
len = strlen(test_str_second) + 1;
EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
- len = sizeof(buf);
+ len = strlen(test_str) + 1;
memset(buf, 0, len);
- EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
-
+ EXPECT_NE((len = recv(self->cfd, buf, len,
+ MSG_PEEK | MSG_WAITALL)), -1);
len = strlen(test_str) + 1;
EXPECT_EQ(memcmp(test_str, buf, len), 0);
}