aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-02-10 17:51:27 -0800
committerJakub Kicinski <kuba@kernel.org>2023-02-10 17:51:27 -0800
commitde4287336794f49323a5223c8b6e131f4840a866 (patch)
tree9a2dfaf47fc689cdae5a45a728d0529d27c77d98
parentDocumentation: isdn: correct spelling (diff)
parentlibbpf: Fix alen calculation in libbpf_nla_dump_errormsg() (diff)
downloadwireguard-linux-de4287336794f49323a5223c8b6e131f4840a866.tar.xz
wireguard-linux-de4287336794f49323a5223c8b6e131f4840a866.zip
Daniel Borkmann says:
==================== pull-request: bpf-next 2023-02-11 We've added 96 non-merge commits during the last 14 day(s) which contain a total of 152 files changed, 4884 insertions(+), 962 deletions(-). There is a minor conflict in drivers/net/ethernet/intel/ice/ice_main.c between commit 5b246e533d01 ("ice: split probe into smaller functions") from the net-next tree and commit 66c0e13ad236 ("drivers: net: turn on XDP features") from the bpf-next tree. Remove the hunk given ice_cfg_netdev() is otherwise there a 2nd time, and add XDP features to the existing ice_cfg_netdev() one: [...] ice_set_netdev_features(netdev); netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_XSK_ZEROCOPY; ice_set_ops(netdev); [...] Stephen's merge conflict mail: https://lore.kernel.org/bpf/20230207101951.21a114fa@canb.auug.org.au/ The main changes are: 1) Add support for BPF trampoline on s390x which finally allows to remove many test cases from the BPF CI's DENYLIST.s390x, from Ilya Leoshkevich. 2) Add multi-buffer XDP support to ice driver, from Maciej Fijalkowski. 3) Add capability to export the XDP features supported by the NIC. Along with that, add a XDP compliance test tool, from Lorenzo Bianconi & Marek Majtyka. 4) Add __bpf_kfunc tag for marking kernel functions as kfuncs, from David Vernet. 5) Add a deep dive documentation about the verifier's register liveness tracking algorithm, from Eduard Zingerman. 6) Fix and follow-up cleanups for resolve_btfids to be compiled as a host program to avoid cross compile issues, from Jiri Olsa & Ian Rogers. 7) Batch of fixes to the BPF selftest for xdp_hw_metadata which resulted when testing on different NICs, from Jesper Dangaard Brouer. 8) Fix libbpf to better detect kernel version code on Debian, from Hao Xiang. 9) Extend libbpf to add an option for when the perf buffer should wake up, from Jon Doron. 10) Follow-up fix on xdp_metadata selftest to just consume on TX completion, from Stanislav Fomichev. 11) Extend the kfuncs.rst document with description on kfunc lifecycle & stability expectations, from David Vernet. 12) Fix bpftool prog profile to skip attaching to offline CPUs, from Tonghao Zhang. ==================== Link: https://lore.kernel.org/r/20230211002037.8489-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--Documentation/bpf/bpf_design_QA.rst25
-rw-r--r--Documentation/bpf/instruction-set.rst120
-rw-r--r--Documentation/bpf/kfuncs.rst145
-rw-r--r--Documentation/bpf/libbpf/libbpf_naming_convention.rst6
-rw-r--r--Documentation/bpf/map_xskmap.rst2
-rw-r--r--Documentation/bpf/ringbuf.rst4
-rw-r--r--Documentation/bpf/verifier.rst297
-rw-r--r--Documentation/conf.py3
-rw-r--r--Documentation/netlink/specs/netdev.yaml100
-rw-r--r--arch/s390/net/bpf_jit_comp.c715
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c3
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c408
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c236
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c192
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c11
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/netdevsim/netdev.c1
-rw-r--r--drivers/net/tun.c5
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c8
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--include/linux/bpf.h14
-rw-r--r--include/linux/btf.h23
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/net/xdp.h15
-rw-r--r--include/uapi/linux/bpf.h2
-rw-r--r--include/uapi/linux/netdev.h59
-rw-r--r--kernel/bpf/btf.c16
-rw-r--r--kernel/bpf/cpumask.c63
-rw-r--r--kernel/bpf/devmap.c16
-rw-r--r--kernel/bpf/helpers.c38
-rw-r--r--kernel/bpf/offload.c2
-rw-r--r--kernel/bpf/preload/bpf_preload_kern.c6
-rw-r--r--kernel/bpf/preload/iterators/Makefile12
-rw-r--r--kernel/bpf/preload/iterators/README5
-rw-r--r--kernel/bpf/preload/iterators/iterators.lskel-big-endian.h419
-rw-r--r--kernel/bpf/preload/iterators/iterators.lskel-little-endian.h (renamed from kernel/bpf/preload/iterators/iterators.lskel.h)0
-rw-r--r--kernel/bpf/syscall.c23
-rw-r--r--kernel/cgroup/rstat.c4
-rw-r--r--kernel/kexec_core.c3
-rw-r--r--kernel/trace/bpf_trace.c8
-rw-r--r--net/bpf/test_run.c70
-rw-r--r--net/core/Makefile3
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/filter.c17
-rw-r--r--net/core/netdev-genl-gen.c48
-rw-r--r--net/core/netdev-genl-gen.h23
-rw-r--r--net/core/netdev-genl.c179
-rw-r--r--net/core/xdp.c23
-rw-r--r--net/ipv4/tcp_bbr.c16
-rw-r--r--net/ipv4/tcp_cong.c10
-rw-r--r--net/ipv4/tcp_cubic.c12
-rw-r--r--net/ipv4/tcp_dctcp.c12
-rw-r--r--net/netfilter/nf_conntrack_bpf.c20
-rw-r--r--net/netfilter/nf_nat_bpf.c6
-rw-r--r--net/xdp/xsk_buff_pool.c7
-rw-r--r--net/xfrm/xfrm_interface_bpf.c7
-rw-r--r--samples/bpf/syscall_tp_kern.c14
-rw-r--r--tools/bpf/bpftool/prog.c38
-rw-r--r--tools/bpf/resolve_btfids/Build4
-rw-r--r--tools/bpf/resolve_btfids/Makefile13
-rw-r--r--tools/bpf/runqslower/Makefile2
-rw-r--r--tools/include/uapi/linux/bpf.h6
-rw-r--r--tools/include/uapi/linux/netdev.h59
-rw-r--r--tools/lib/bpf/bpf_core_read.h4
-rw-r--r--tools/lib/bpf/bpf_helpers.h2
-rw-r--r--tools/lib/bpf/libbpf.c46
-rw-r--r--tools/lib/bpf/libbpf.h7
-rw-r--r--tools/lib/bpf/libbpf_probes.c83
-rw-r--r--tools/lib/bpf/netlink.c118
-rw-r--r--tools/lib/bpf/nlattr.c2
-rw-r--r--tools/lib/bpf/nlattr.h12
-rw-r--r--tools/lib/bpf/usdt.bpf.h5
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x69
-rw-r--r--tools/testing/selftests/bpf/Makefile33
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c2
-rw-r--r--tools/testing/selftests/bpf/netcnt_common.h6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/decap_sanity.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c22
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/usdt.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_info.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c7
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c29
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c7
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h62
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_vmlinux.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c8
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_features.c269
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_hw_metadata.c6
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c38
-rw-r--r--tools/testing/selftests/bpf/test_progs.h2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c4
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_features.sh107
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh2
-rw-r--r--tools/testing/selftests/bpf/xdp_features.c699
-rw-r--r--tools/testing/selftests/bpf/xdp_features.h20
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c35
-rw-r--r--tools/testing/selftests/bpf/xdp_synproxy.c1
152 files changed, 4884 insertions, 962 deletions
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index cec2371173d7..bfff0e7e37c2 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -208,6 +208,10 @@ data structures and compile with kernel internal headers. Both of these
kernel internals are subject to change and can break with newer kernels
such that the program needs to be adapted accordingly.
+New BPF functionality is generally added through the use of kfuncs instead of
+new helpers. Kfuncs are not considered part of the stable API, and have their own
+lifecycle expectations as described in :ref:`BPF_kfunc_lifecycle_expectations`.
+
Q: Are tracepoints part of the stable ABI?
------------------------------------------
A: NO. Tracepoints are tied to internal implementation details hence they are
@@ -236,8 +240,8 @@ A: NO. Classic BPF programs are converted into extend BPF instructions.
Q: Can BPF call arbitrary kernel functions?
-------------------------------------------
-A: NO. BPF programs can only call a set of helper functions which
-is defined for every program type.
+A: NO. BPF programs can only call specific functions exposed as BPF helpers or
+kfuncs. The set of available functions is defined for every program type.
Q: Can BPF overwrite arbitrary kernel memory?
---------------------------------------------
@@ -263,7 +267,12 @@ Q: New functionality via kernel modules?
Q: Can BPF functionality such as new program or map types, new
helpers, etc be added out of kernel module code?
-A: NO.
+A: Yes, through kfuncs and kptrs
+
+The core BPF functionality such as program types, maps and helpers cannot be
+added to by modules. However, modules can expose functionality to BPF programs
+by exporting kfuncs (which may return pointers to module-internal data
+structures as kptrs).
Q: Directly calling kernel function is an ABI?
----------------------------------------------
@@ -278,7 +287,8 @@ kernel functions have already been used by other kernel tcp
cc (congestion-control) implementations. If any of these kernel
functions has changed, both the in-tree and out-of-tree kernel tcp cc
implementations have to be changed. The same goes for the bpf
-programs and they have to be adjusted accordingly.
+programs and they have to be adjusted accordingly. See
+:ref:`BPF_kfunc_lifecycle_expectations` for details.
Q: Attaching to arbitrary kernel functions is an ABI?
-----------------------------------------------------
@@ -340,6 +350,7 @@ compatibility for these features?
A: NO.
-Unlike map value types, there are no stability guarantees for this case. The
-whole API to work with allocated objects and any support for special fields
-inside them is unstable (since it is exposed through kfuncs).
+Unlike map value types, the API to work with allocated objects and any support
+for special fields inside them is exposed through kfuncs, and thus has the same
+lifecycle expectations as the kfuncs themselves. See
+:ref:`BPF_kfunc_lifecycle_expectations` for details.
diff --git a/Documentation/bpf/instruction-set.rst b/Documentation/bpf/instruction-set.rst
index 2d3fe59bd260..af515de5fc38 100644
--- a/Documentation/bpf/instruction-set.rst
+++ b/Documentation/bpf/instruction-set.rst
@@ -7,6 +7,11 @@ eBPF Instruction Set Specification, v1.0
This document specifies version 1.0 of the eBPF instruction set.
+Documentation conventions
+=========================
+
+For brevity, this document uses the type notion "u64", "u32", etc.
+to mean an unsigned integer whose width is the specified number of bits.
Registers and calling convention
================================
@@ -30,20 +35,56 @@ Instruction encoding
eBPF has two instruction encodings:
* the basic instruction encoding, which uses 64 bits to encode an instruction
-* the wide instruction encoding, which appends a second 64-bit immediate value
- (imm64) after the basic instruction for a total of 128 bits.
+* the wide instruction encoding, which appends a second 64-bit immediate (i.e.,
+ constant) value after the basic instruction for a total of 128 bits.
+
+The basic instruction encoding is as follows, where MSB and LSB mean the most significant
+bits and least significant bits, respectively:
+
+============= ======= ======= ======= ============
+32 bits (MSB) 16 bits 4 bits 4 bits 8 bits (LSB)
+============= ======= ======= ======= ============
+imm offset src_reg dst_reg opcode
+============= ======= ======= ======= ============
+
+**imm**
+ signed integer immediate value
-The basic instruction encoding looks as follows:
+**offset**
+ signed integer offset used with pointer arithmetic
-============= ======= =============== ==================== ============
-32 bits (MSB) 16 bits 4 bits 4 bits 8 bits (LSB)
-============= ======= =============== ==================== ============
-immediate offset source register destination register opcode
-============= ======= =============== ==================== ============
+**src_reg**
+ the source register number (0-10), except where otherwise specified
+ (`64-bit immediate instructions`_ reuse this field for other purposes)
+
+**dst_reg**
+ destination register number (0-10)
+
+**opcode**
+ operation to perform
Note that most instructions do not use all of the fields.
Unused fields shall be cleared to zero.
+As discussed below in `64-bit immediate instructions`_, a 64-bit immediate
+instruction uses a 64-bit immediate value that is constructed as follows.
+The 64 bits following the basic instruction contain a pseudo instruction
+using the same format but with opcode, dst_reg, src_reg, and offset all set to zero,
+and imm containing the high 32 bits of the immediate value.
+
+================= ==================
+64 bits (MSB) 64 bits (LSB)
+================= ==================
+basic instruction pseudo instruction
+================= ==================
+
+Thus the 64-bit immediate value is constructed as follows:
+
+ imm64 = (next_imm << 32) | imm
+
+where 'next_imm' refers to the imm value of the pseudo instruction
+following the basic instruction.
+
Instruction classes
-------------------
@@ -71,27 +112,32 @@ For arithmetic and jump instructions (``BPF_ALU``, ``BPF_ALU64``, ``BPF_JMP`` an
============== ====== =================
4 bits (MSB) 1 bit 3 bits (LSB)
============== ====== =================
-operation code source instruction class
+code source instruction class
============== ====== =================
-The 4th bit encodes the source operand:
+**code**
+ the operation code, whose meaning varies by instruction class
- ====== ===== ========================================
- source value description
- ====== ===== ========================================
- BPF_K 0x00 use 32-bit immediate as source operand
- BPF_X 0x08 use 'src_reg' register as source operand
- ====== ===== ========================================
+**source**
+ the source operand location, which unless otherwise specified is one of:
-The four MSB bits store the operation code.
+ ====== ===== ==============================================
+ source value description
+ ====== ===== ==============================================
+ BPF_K 0x00 use 32-bit 'imm' value as source operand
+ BPF_X 0x08 use 'src_reg' register value as source operand
+ ====== ===== ==============================================
+**instruction class**
+ the instruction class (see `Instruction classes`_)
Arithmetic instructions
-----------------------
``BPF_ALU`` uses 32-bit wide operands while ``BPF_ALU64`` uses 64-bit wide operands for
otherwise identical operations.
-The 'code' field encodes the operation as below:
+The 'code' field encodes the operation as below, where 'src' and 'dst' refer
+to the values of the source and destination registers, respectively.
======== ===== ==========================================================
code value description
@@ -121,19 +167,21 @@ the destination register is unchanged whereas for ``BPF_ALU`` the upper
``BPF_ADD | BPF_X | BPF_ALU`` means::
- dst_reg = (u32) dst_reg + (u32) src_reg;
+ dst = (u32) ((u32) dst + (u32) src)
+
+where '(u32)' indicates that the upper 32 bits are zeroed.
``BPF_ADD | BPF_X | BPF_ALU64`` means::
- dst_reg = dst_reg + src_reg
+ dst = dst + src
``BPF_XOR | BPF_K | BPF_ALU`` means::
- dst_reg = (u32) dst_reg ^ (u32) imm32
+ dst = (u32) dst ^ (u32) imm32
``BPF_XOR | BPF_K | BPF_ALU64`` means::
- dst_reg = dst_reg ^ imm32
+ dst = dst ^ imm32
Also note that the division and modulo operations are unsigned. Thus, for
``BPF_ALU``, 'imm' is first interpreted as an unsigned 32-bit value, whereas
@@ -167,11 +215,11 @@ Examples:
``BPF_ALU | BPF_TO_LE | BPF_END`` with imm = 16 means::
- dst_reg = htole16(dst_reg)
+ dst = htole16(dst)
``BPF_ALU | BPF_TO_BE | BPF_END`` with imm = 64 means::
- dst_reg = htobe64(dst_reg)
+ dst = htobe64(dst)
Jump instructions
-----------------
@@ -246,15 +294,15 @@ instructions that transfer data between a register and memory.
``BPF_MEM | <size> | BPF_STX`` means::
- *(size *) (dst_reg + off) = src_reg
+ *(size *) (dst + offset) = src
``BPF_MEM | <size> | BPF_ST`` means::
- *(size *) (dst_reg + off) = imm32
+ *(size *) (dst + offset) = imm32
``BPF_MEM | <size> | BPF_LDX`` means::
- dst_reg = *(size *) (src_reg + off)
+ dst = *(size *) (src + offset)
Where size is one of: ``BPF_B``, ``BPF_H``, ``BPF_W``, or ``BPF_DW``.
@@ -288,11 +336,11 @@ BPF_XOR 0xa0 atomic xor
``BPF_ATOMIC | BPF_W | BPF_STX`` with 'imm' = BPF_ADD means::
- *(u32 *)(dst_reg + off16) += src_reg
+ *(u32 *)(dst + offset) += src
``BPF_ATOMIC | BPF_DW | BPF_STX`` with 'imm' = BPF ADD means::
- *(u64 *)(dst_reg + off16) += src_reg
+ *(u64 *)(dst + offset) += src
In addition to the simple atomic operations, there also is a modifier and
two complex atomic operations:
@@ -307,16 +355,16 @@ BPF_CMPXCHG 0xf0 | BPF_FETCH atomic compare and exchange
The ``BPF_FETCH`` modifier is optional for simple atomic operations, and
always set for the complex atomic operations. If the ``BPF_FETCH`` flag
-is set, then the operation also overwrites ``src_reg`` with the value that
+is set, then the operation also overwrites ``src`` with the value that
was in memory before it was modified.
-The ``BPF_XCHG`` operation atomically exchanges ``src_reg`` with the value
-addressed by ``dst_reg + off``.
+The ``BPF_XCHG`` operation atomically exchanges ``src`` with the value
+addressed by ``dst + offset``.
The ``BPF_CMPXCHG`` operation atomically compares the value addressed by
-``dst_reg + off`` with ``R0``. If they match, the value addressed by
-``dst_reg + off`` is replaced with ``src_reg``. In either case, the
-value that was at ``dst_reg + off`` before the operation is zero-extended
+``dst + offset`` with ``R0``. If they match, the value addressed by
+``dst + offset`` is replaced with ``src``. In either case, the
+value that was at ``dst + offset`` before the operation is zero-extended
and loaded back to ``R0``.
64-bit immediate instructions
@@ -329,7 +377,7 @@ There is currently only one such instruction.
``BPF_LD | BPF_DW | BPF_IMM`` means::
- dst_reg = imm64
+ dst = imm64
Legacy BPF Packet access instructions
diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst
index 1a683225d080..ca96ef3f6896 100644
--- a/Documentation/bpf/kfuncs.rst
+++ b/Documentation/bpf/kfuncs.rst
@@ -13,7 +13,7 @@ BPF Kernel Functions or more commonly known as kfuncs are functions in the Linux
kernel which are exposed for use by BPF programs. Unlike normal BPF helpers,
kfuncs do not have a stable interface and can change from one kernel release to
another. Hence, BPF programs need to be updated in response to changes in the
-kernel.
+kernel. See :ref:`BPF_kfunc_lifecycle_expectations` for more information.
2. Defining a kfunc
===================
@@ -41,7 +41,7 @@ An example is given below::
__diag_ignore_all("-Wmissing-prototypes",
"Global kfuncs as their definitions will be in BTF");
- struct task_struct *bpf_find_get_task_by_vpid(pid_t nr)
+ __bpf_kfunc struct task_struct *bpf_find_get_task_by_vpid(pid_t nr)
{
return find_get_task_by_vpid(nr);
}
@@ -66,7 +66,7 @@ kfunc with a __tag, where tag may be one of the supported annotations.
This annotation is used to indicate a memory and size pair in the argument list.
An example is given below::
- void bpf_memzero(void *mem, int mem__sz)
+ __bpf_kfunc void bpf_memzero(void *mem, int mem__sz)
{
...
}
@@ -86,7 +86,7 @@ safety of the program.
An example is given below::
- void *bpf_obj_new(u32 local_type_id__k, ...)
+ __bpf_kfunc void *bpf_obj_new(u32 local_type_id__k, ...)
{
...
}
@@ -125,6 +125,20 @@ flags on a set of kfuncs as follows::
This set encodes the BTF ID of each kfunc listed above, and encodes the flags
along with it. Ofcourse, it is also allowed to specify no flags.
+kfunc definitions should also always be annotated with the ``__bpf_kfunc``
+macro. This prevents issues such as the compiler inlining the kfunc if it's a
+static kernel function, or the function being elided in an LTO build as it's
+not used in the rest of the kernel. Developers should not manually add
+annotations to their kfunc to prevent these issues. If an annotation is
+required to prevent such an issue with your kfunc, it is a bug and should be
+added to the definition of the macro so that other kfuncs are similarly
+protected. An example is given below::
+
+ __bpf_kfunc struct task_struct *bpf_get_task_pid(s32 pid)
+ {
+ ...
+ }
+
2.4.1 KF_ACQUIRE flag
---------------------
@@ -224,6 +238,28 @@ single argument which must be a trusted argument or a MEM_RCU pointer.
The argument may have reference count of 0 and the kfunc must take this
into consideration.
+.. _KF_deprecated_flag:
+
+2.4.9 KF_DEPRECATED flag
+------------------------
+
+The KF_DEPRECATED flag is used for kfuncs which are scheduled to be
+changed or removed in a subsequent kernel release. A kfunc that is
+marked with KF_DEPRECATED should also have any relevant information
+captured in its kernel doc. Such information typically includes the
+kfunc's expected remaining lifespan, a recommendation for new
+functionality that can replace it if any is available, and possibly a
+rationale for why it is being removed.
+
+Note that while on some occasions, a KF_DEPRECATED kfunc may continue to be
+supported and have its KF_DEPRECATED flag removed, it is likely to be far more
+difficult to remove a KF_DEPRECATED flag after it's been added than it is to
+prevent it from being added in the first place. As described in
+:ref:`BPF_kfunc_lifecycle_expectations`, users that rely on specific kfuncs are
+encouraged to make their use-cases known as early as possible, and participate
+in upstream discussions regarding whether to keep, change, deprecate, or remove
+those kfuncs if and when such discussions occur.
+
2.5 Registering the kfuncs
--------------------------
@@ -290,14 +326,107 @@ In order to accommodate such requirements, the verifier will enforce strict
PTR_TO_BTF_ID type matching if two types have the exact same name, with one
being suffixed with ``___init``.
-3. Core kfuncs
+.. _BPF_kfunc_lifecycle_expectations:
+
+3. kfunc lifecycle expectations
+===============================
+
+kfuncs provide a kernel <-> kernel API, and thus are not bound by any of the
+strict stability restrictions associated with kernel <-> user UAPIs. This means
+they can be thought of as similar to EXPORT_SYMBOL_GPL, and can therefore be
+modified or removed by a maintainer of the subsystem they're defined in when
+it's deemed necessary.
+
+Like any other change to the kernel, maintainers will not change or remove a
+kfunc without having a reasonable justification. Whether or not they'll choose
+to change a kfunc will ultimately depend on a variety of factors, such as how
+widely used the kfunc is, how long the kfunc has been in the kernel, whether an
+alternative kfunc exists, what the norm is in terms of stability for the
+subsystem in question, and of course what the technical cost is of continuing
+to support the kfunc.
+
+There are several implications of this:
+
+a) kfuncs that are widely used or have been in the kernel for a long time will
+ be more difficult to justify being changed or removed by a maintainer. In
+ other words, kfuncs that are known to have a lot of users and provide
+ significant value provide stronger incentives for maintainers to invest the
+ time and complexity in supporting them. It is therefore important for
+ developers that are using kfuncs in their BPF programs to communicate and
+ explain how and why those kfuncs are being used, and to participate in
+ discussions regarding those kfuncs when they occur upstream.
+
+b) Unlike regular kernel symbols marked with EXPORT_SYMBOL_GPL, BPF programs
+ that call kfuncs are generally not part of the kernel tree. This means that
+ refactoring cannot typically change callers in-place when a kfunc changes,
+ as is done for e.g. an upstreamed driver being updated in place when a
+ kernel symbol is changed.
+
+ Unlike with regular kernel symbols, this is expected behavior for BPF
+ symbols, and out-of-tree BPF programs that use kfuncs should be considered
+ relevant to discussions and decisions around modifying and removing those
+ kfuncs. The BPF community will take an active role in participating in
+ upstream discussions when necessary to ensure that the perspectives of such
+ users are taken into account.
+
+c) A kfunc will never have any hard stability guarantees. BPF APIs cannot and
+ will not ever hard-block a change in the kernel purely for stability
+ reasons. That being said, kfuncs are features that are meant to solve
+ problems and provide value to users. The decision of whether to change or
+ remove a kfunc is a multivariate technical decision that is made on a
+ case-by-case basis, and which is informed by data points such as those
+ mentioned above. It is expected that a kfunc being removed or changed with
+ no warning will not be a common occurrence or take place without sound
+ justification, but it is a possibility that must be accepted if one is to
+ use kfuncs.
+
+3.1 kfunc deprecation
+---------------------
+
+As described above, while sometimes a maintainer may find that a kfunc must be
+changed or removed immediately to accommodate some changes in their subsystem,
+usually kfuncs will be able to accommodate a longer and more measured
+deprecation process. For example, if a new kfunc comes along which provides
+superior functionality to an existing kfunc, the existing kfunc may be
+deprecated for some period of time to allow users to migrate their BPF programs
+to use the new one. Or, if a kfunc has no known users, a decision may be made
+to remove the kfunc (without providing an alternative API) after some
+deprecation period so as to provide users with a window to notify the kfunc
+maintainer if it turns out that the kfunc is actually being used.
+
+It's expected that the common case will be that kfuncs will go through a
+deprecation period rather than being changed or removed without warning. As
+described in :ref:`KF_deprecated_flag`, the kfunc framework provides the
+KF_DEPRECATED flag to kfunc developers to signal to users that a kfunc has been
+deprecated. Once a kfunc has been marked with KF_DEPRECATED, the following
+procedure is followed for removal:
+
+1. Any relevant information for deprecated kfuncs is documented in the kfunc's
+ kernel docs. This documentation will typically include the kfunc's expected
+ remaining lifespan, a recommendation for new functionality that can replace
+ the usage of the deprecated function (or an explanation as to why no such
+ replacement exists), etc.
+
+2. The deprecated kfunc is kept in the kernel for some period of time after it
+ was first marked as deprecated. This time period will be chosen on a
+ case-by-case basis, and will typically depend on how widespread the use of
+ the kfunc is, how long it has been in the kernel, and how hard it is to move
+ to alternatives. This deprecation time period is "best effort", and as
+ described :ref:`above<BPF_kfunc_lifecycle_expectations>`, circumstances may
+ sometimes dictate that the kfunc be removed before the full intended
+ deprecation period has elapsed.
+
+3. After the deprecation period the kfunc will be removed. At this point, BPF
+ programs calling the kfunc will be rejected by the verifier.
+
+4. Core kfuncs
==============
The BPF subsystem provides a number of "core" kfuncs that are potentially
applicable to a wide variety of different possible use cases and programs.
Those kfuncs are documented here.
-3.1 struct task_struct * kfuncs
+4.1 struct task_struct * kfuncs
-------------------------------
There are a number of kfuncs that allow ``struct task_struct *`` objects to be
@@ -373,7 +502,7 @@ Here is an example of it being used:
return 0;
}
-3.2 struct cgroup * kfuncs
+4.2 struct cgroup * kfuncs
--------------------------
``struct cgroup *`` objects also have acquire and release functions:
@@ -488,7 +617,7 @@ the verifier. bpf_cgroup_ancestor() can be used as follows:
return 0;
}
-3.3 struct cpumask * kfuncs
+4.3 struct cpumask * kfuncs
---------------------------
BPF provides a set of kfuncs that can be used to query, allocate, mutate, and
diff --git a/Documentation/bpf/libbpf/libbpf_naming_convention.rst b/Documentation/bpf/libbpf/libbpf_naming_convention.rst
index c5ac97f3d4c4..b5b41b61b3c0 100644
--- a/Documentation/bpf/libbpf/libbpf_naming_convention.rst
+++ b/Documentation/bpf/libbpf/libbpf_naming_convention.rst
@@ -83,8 +83,8 @@ This prevents from accidentally exporting a symbol, that is not supposed
to be a part of ABI what, in turn, improves both libbpf developer- and
user-experiences.
-ABI versionning
----------------
+ABI versioning
+--------------
To make future ABI extensions possible libbpf ABI is versioned.
Versioning is implemented by ``libbpf.map`` version script that is
@@ -148,7 +148,7 @@ API documentation convention
The libbpf API is documented via comments above definitions in
header files. These comments can be rendered by doxygen and sphinx
for well organized html output. This section describes the
-convention in which these comments should be formated.
+convention in which these comments should be formatted.
Here is an example from btf.h:
diff --git a/Documentation/bpf/map_xskmap.rst b/Documentation/bpf/map_xskmap.rst
index 7093b8208451..dc143edd9233 100644
--- a/Documentation/bpf/map_xskmap.rst
+++ b/Documentation/bpf/map_xskmap.rst
@@ -178,7 +178,7 @@ The following code snippet shows how to update an XSKMAP with an XSK entry.
For an example on how create AF_XDP sockets, please see the AF_XDP-example and
AF_XDP-forwarding programs in the `bpf-examples`_ directory in the `libxdp`_ repository.
-For a detailed explaination of the AF_XDP interface please see:
+For a detailed explanation of the AF_XDP interface please see:
- `libxdp-readme`_.
- `AF_XDP`_ kernel documentation.
diff --git a/Documentation/bpf/ringbuf.rst b/Documentation/bpf/ringbuf.rst
index 6a615cd62bda..a99cd05d79d4 100644
--- a/Documentation/bpf/ringbuf.rst
+++ b/Documentation/bpf/ringbuf.rst
@@ -124,7 +124,7 @@ buffer. Currently 4 are supported:
- ``BPF_RB_AVAIL_DATA`` returns amount of unconsumed data in ring buffer;
- ``BPF_RB_RING_SIZE`` returns the size of ring buffer;
-- ``BPF_RB_CONS_POS``/``BPF_RB_PROD_POS`` returns current logical possition
+- ``BPF_RB_CONS_POS``/``BPF_RB_PROD_POS`` returns current logical position
of consumer/producer, respectively.
Returned values are momentarily snapshots of ring buffer state and could be
@@ -146,7 +146,7 @@ Design and Implementation
This reserve/commit schema allows a natural way for multiple producers, either
on different CPUs or even on the same CPU/in the same BPF program, to reserve
independent records and work with them without blocking other producers. This
-means that if BPF program was interruped by another BPF program sharing the
+means that if BPF program was interrupted by another BPF program sharing the
same ring buffer, they will both get a record reserved (provided there is
enough space left) and can work with it and submit it independently. This
applies to NMI context as well, except that due to using a spinlock during
diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst
index d4326caf01f9..f0ec19db301c 100644
--- a/Documentation/bpf/verifier.rst
+++ b/Documentation/bpf/verifier.rst
@@ -192,7 +192,7 @@ checked and found to be non-NULL, all copies can become PTR_TO_MAP_VALUEs.
As well as range-checking, the tracked information is also used for enforcing
alignment of pointer accesses. For instance, on most systems the packet pointer
is 2 bytes after a 4-byte alignment. If a program adds 14 bytes to that to jump
-over the Ethernet header, then reads IHL and addes (IHL * 4), the resulting
+over the Ethernet header, then reads IHL and adds (IHL * 4), the resulting
pointer will have a variable offset known to be 4n+2 for some n, so adding the 2
bytes (NET_IP_ALIGN) gives a 4-byte alignment and so word-sized accesses through
that pointer are safe.
@@ -316,6 +316,301 @@ Pruning considers not only the registers but also the stack (and any spilled
registers it may hold). They must all be safe for the branch to be pruned.
This is implemented in states_equal().
+Some technical details about state pruning implementation could be found below.
+
+Register liveness tracking
+--------------------------
+
+In order to make state pruning effective, liveness state is tracked for each
+register and stack slot. The basic idea is to track which registers and stack
+slots are actually used during subseqeuent execution of the program, until
+program exit is reached. Registers and stack slots that were never used could be
+removed from the cached state thus making more states equivalent to a cached
+state. This could be illustrated by the following program::
+
+ 0: call bpf_get_prandom_u32()
+ 1: r1 = 0
+ 2: if r0 == 0 goto +1
+ 3: r0 = 1
+ --- checkpoint ---
+ 4: r0 = r1
+ 5: exit
+
+Suppose that a state cache entry is created at instruction #4 (such entries are
+also called "checkpoints" in the text below). The verifier could reach the
+instruction with one of two possible register states:
+
+* r0 = 1, r1 = 0
+* r0 = 0, r1 = 0
+
+However, only the value of register ``r1`` is important to successfully finish
+verification. The goal of the liveness tracking algorithm is to spot this fact
+and figure out that both states are actually equivalent.
+
+Data structures
+~~~~~~~~~~~~~~~
+
+Liveness is tracked using the following data structures::
+
+ enum bpf_reg_liveness {
+ REG_LIVE_NONE = 0,
+ REG_LIVE_READ32 = 0x1,
+ REG_LIVE_READ64 = 0x2,
+ REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
+ REG_LIVE_WRITTEN = 0x4,
+ REG_LIVE_DONE = 0x8,
+ };
+
+ struct bpf_reg_state {
+ ...
+ struct bpf_reg_state *parent;
+ ...
+ enum bpf_reg_liveness live;
+ ...
+ };
+
+ struct bpf_stack_state {
+ struct bpf_reg_state spilled_ptr;
+ ...
+ };
+
+ struct bpf_func_state {
+ struct bpf_reg_state regs[MAX_BPF_REG];
+ ...
+ struct bpf_stack_state *stack;
+ }
+
+ struct bpf_verifier_state {
+ struct bpf_func_state *frame[MAX_CALL_FRAMES];
+ struct bpf_verifier_state *parent;
+ ...
+ }
+
+* ``REG_LIVE_NONE`` is an initial value assigned to ``->live`` fields upon new
+ verifier state creation;
+
+* ``REG_LIVE_WRITTEN`` means that the value of the register (or stack slot) is
+ defined by some instruction verified between this verifier state's parent and
+ verifier state itself;
+
+* ``REG_LIVE_READ{32,64}`` means that the value of the register (or stack slot)
+ is read by a some child state of this verifier state;
+
+* ``REG_LIVE_DONE`` is a marker used by ``clean_verifier_state()`` to avoid
+ processing same verifier state multiple times and for some sanity checks;
+
+* ``->live`` field values are formed by combining ``enum bpf_reg_liveness``
+ values using bitwise or.
+
+Register parentage chains
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to propagate information between parent and child states, a *register
+parentage chain* is established. Each register or stack slot is linked to a
+corresponding register or stack slot in its parent state via a ``->parent``
+pointer. This link is established upon state creation in ``is_state_visited()``
+and might be modified by ``set_callee_state()`` called from
+``__check_func_call()``.
+
+The rules for correspondence between registers / stack slots are as follows:
+
+* For the current stack frame, registers and stack slots of the new state are
+ linked to the registers and stack slots of the parent state with the same
+ indices.
+
+* For the outer stack frames, only caller saved registers (r6-r9) and stack
+ slots are linked to the registers and stack slots of the parent state with the
+ same indices.
+
+* When function call is processed a new ``struct bpf_func_state`` instance is
+ allocated, it encapsulates a new set of registers and stack slots. For this
+ new frame, parent links for r6-r9 and stack slots are set to nil, parent links
+ for r1-r5 are set to match caller r1-r5 parent links.
+
+This could be illustrated by the following diagram (arrows stand for
+``->parent`` pointers)::
+
+ ... ; Frame #0, some instructions
+ --- checkpoint #0 ---
+ 1 : r6 = 42 ; Frame #0
+ --- checkpoint #1 ---
+ 2 : call foo() ; Frame #0
+ ... ; Frame #1, instructions from foo()
+ --- checkpoint #2 ---
+ ... ; Frame #1, instructions from foo()
+ --- checkpoint #3 ---
+ exit ; Frame #1, return from foo()
+ 3 : r1 = r6 ; Frame #0 <- current state
+
+ +-------------------------------+-------------------------------+
+ | Frame #0 | Frame #1 |
+ Checkpoint +-------------------------------+-------------------------------+
+ #0 | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+
+ ^ ^ ^ ^
+ | | | |
+ Checkpoint +-------------------------------+
+ #1 | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+
+ ^ ^ ^
+ |_______|_______|_______________
+ | | |
+ nil nil | | | nil nil
+ | | | | | | |
+ Checkpoint +-------------------------------+-------------------------------+
+ #2 | r0 | r1-r5 | r6-r9 | fp-8 ... | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+-------------------------------+
+ ^ ^ ^ ^ ^
+ nil nil | | | | |
+ | | | | | | |
+ Checkpoint +-------------------------------+-------------------------------+
+ #3 | r0 | r1-r5 | r6-r9 | fp-8 ... | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+-------------------------------+
+ ^ ^
+ nil nil | |
+ | | | |
+ Current +-------------------------------+
+ state | r0 | r1-r5 | r6-r9 | fp-8 ... |
+ +-------------------------------+
+ \
+ r6 read mark is propagated via these links
+ all the way up to checkpoint #1.
+ The checkpoint #1 contains a write mark for r6
+ because of instruction (1), thus read propagation
+ does not reach checkpoint #0 (see section below).
+
+Liveness marks tracking
+~~~~~~~~~~~~~~~~~~~~~~~
+
+For each processed instruction, the verifier tracks read and written registers
+and stack slots. The main idea of the algorithm is that read marks propagate
+back along the state parentage chain until they hit a write mark, which 'screens
+off' earlier states from the read. The information about reads is propagated by
+function ``mark_reg_read()`` which could be summarized as follows::
+
+ mark_reg_read(struct bpf_reg_state *state, ...):
+ parent = state->parent
+ while parent:
+ if state->live & REG_LIVE_WRITTEN:
+ break
+ if parent->live & REG_LIVE_READ64:
+ break
+ parent->live |= REG_LIVE_READ64
+ state = parent
+ parent = state->parent
+
+Notes:
+
+* The read marks are applied to the **parent** state while write marks are
+ applied to the **current** state. The write mark on a register or stack slot
+ means that it is updated by some instruction in the straight-line code leading
+ from the parent state to the current state.
+
+* Details about REG_LIVE_READ32 are omitted.
+
+* Function ``propagate_liveness()`` (see section :ref:`read_marks_for_cache_hits`)
+ might override the first parent link. Please refer to the comments in the
+ ``propagate_liveness()`` and ``mark_reg_read()`` source code for further
+ details.
+
+Because stack writes could have different sizes ``REG_LIVE_WRITTEN`` marks are
+applied conservatively: stack slots are marked as written only if write size
+corresponds to the size of the register, e.g. see function ``save_register_state()``.
+
+Consider the following example::
+
+ 0: (*u64)(r10 - 8) = 0 ; define 8 bytes of fp-8
+ --- checkpoint #0 ---
+ 1: (*u32)(r10 - 8) = 1 ; redefine lower 4 bytes
+ 2: r1 = (*u32)(r10 - 8) ; read lower 4 bytes defined at (1)
+ 3: r2 = (*u32)(r10 - 4) ; read upper 4 bytes defined at (0)
+
+As stated above, the write at (1) does not count as ``REG_LIVE_WRITTEN``. Should
+it be otherwise, the algorithm above wouldn't be able to propagate the read mark
+from (3) to checkpoint #0.
+
+Once the ``BPF_EXIT`` instruction is reached ``update_branch_counts()`` is
+called to update the ``->branches`` counter for each verifier state in a chain
+of parent verifier states. When the ``->branches`` counter reaches zero the
+verifier state becomes a valid entry in a set of cached verifier states.
+
+Each entry of the verifier states cache is post-processed by a function
+``clean_live_states()``. This function marks all registers and stack slots
+without ``REG_LIVE_READ{32,64}`` marks as ``NOT_INIT`` or ``STACK_INVALID``.
+Registers/stack slots marked in this way are ignored in function ``stacksafe()``
+called from ``states_equal()`` when a state cache entry is considered for
+equivalence with a current state.
+
+Now it is possible to explain how the example from the beginning of the section
+works::
+
+ 0: call bpf_get_prandom_u32()
+ 1: r1 = 0
+ 2: if r0 == 0 goto +1
+ 3: r0 = 1
+ --- checkpoint[0] ---
+ 4: r0 = r1
+ 5: exit
+
+* At instruction #2 branching point is reached and state ``{ r0 == 0, r1 == 0, pc == 4 }``
+ is pushed to states processing queue (pc stands for program counter).
+
+* At instruction #4:
+
+ * ``checkpoint[0]`` states cache entry is created: ``{ r0 == 1, r1 == 0, pc == 4 }``;
+ * ``checkpoint[0].r0`` is marked as written;
+ * ``checkpoint[0].r1`` is marked as read;
+
+* At instruction #5 exit is reached and ``checkpoint[0]`` can now be processed
+ by ``clean_live_states()``. After this processing ``checkpoint[0].r0`` has a
+ read mark and all other registers and stack slots are marked as ``NOT_INIT``
+ or ``STACK_INVALID``
+
+* The state ``{ r0 == 0, r1 == 0, pc == 4 }`` is popped from the states queue
+ and is compared against a cached state ``{ r1 == 0, pc == 4 }``, the states
+ are considered equivalent.
+
+.. _read_marks_for_cache_hits:
+
+Read marks propagation for cache hits
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Another point is the handling of read marks when a previously verified state is
+found in the states cache. Upon cache hit verifier must behave in the same way
+as if the current state was verified to the program exit. This means that all
+read marks, present on registers and stack slots of the cached state, must be
+propagated over the parentage chain of the current state. Example below shows
+why this is important. Function ``propagate_liveness()`` handles this case.
+
+Consider the following state parentage chain (S is a starting state, A-E are
+derived states, -> arrows show which state is derived from which)::
+
+ r1 read
+ <------------- A[r1] == 0
+ C[r1] == 0
+ S ---> A ---> B ---> exit E[r1] == 1
+ |
+ ` ---> C ---> D
+ |
+ ` ---> E ^
+ |___ suppose all these
+ ^ states are at insn #Y
+ |
+ suppose all these
+ states are at insn #X
+
+* Chain of states ``S -> A -> B -> exit`` is verified first.
+
+* While ``B -> exit`` is verified, register ``r1`` is read and this read mark is
+ propagated up to state ``A``.
+
+* When chain of states ``C -> D`` is verified the state ``D`` turns out to be
+ equivalent to state ``B``.
+
+* The read mark for ``r1`` has to be propagated to state ``C``, otherwise state
+ ``C`` might get mistakenly marked as equivalent to state ``E`` even though
+ values for register ``r1`` differ between ``C`` and ``E``.
+
Understanding eBPF verifier messages
====================================
diff --git a/Documentation/conf.py b/Documentation/conf.py
index d927737e3c10..8b4e5451a02d 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -116,6 +116,9 @@ if major >= 3:
# include/linux/linkage.h:
"asmlinkage",
+
+ # include/linux/btf.h
+ "__bpf_kfunc",
]
else:
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
new file mode 100644
index 000000000000..b4dcdae54ffd
--- /dev/null
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -0,0 +1,100 @@
+name: netdev
+
+doc:
+ netdev configuration over generic netlink.
+
+definitions:
+ -
+ type: flags
+ name: xdp-act
+ entries:
+ -
+ name: basic
+ doc:
+ XDP feautues set supported by all drivers
+ (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
+ -
+ name: redirect
+ doc:
+ The netdev supports XDP_REDIRECT
+ -
+ name: ndo-xmit
+ doc:
+ This feature informs if netdev implements ndo_xdp_xmit callback.
+ -
+ name: xsk-zerocopy
+ doc:
+ This feature informs if netdev supports AF_XDP in zero copy mode.
+ -
+ name: hw-offload
+ doc:
+ This feature informs if netdev supports XDP hw oflloading.
+ -
+ name: rx-sg
+ doc:
+ This feature informs if netdev implements non-linear XDP buffer
+ support in the driver napi callback.
+ -
+ name: ndo-xmit-sg
+ doc:
+ This feature informs if netdev implements non-linear XDP buffer
+ support in ndo_xdp_xmit callback.
+
+attribute-sets:
+ -
+ name: dev
+ attributes:
+ -
+ name: ifindex
+ doc: netdev ifindex
+ type: u32
+ value: 1
+ checks:
+ min: 1
+ -
+ name: pad
+ type: pad
+ -
+ name: xdp-features
+ doc: Bitmask of enabled xdp-features.
+ type: u64
+ enum: xdp-act
+ enum-as-flags: true
+
+operations:
+ list:
+ -
+ name: dev-get
+ doc: Get / dump information about a netdev.
+ value: 1
+ attribute-set: dev
+ do:
+ request:
+ attributes:
+ - ifindex
+ reply: &dev-all
+ attributes:
+ - ifindex
+ - xdp-features
+ dump:
+ reply: *dev-all
+ -
+ name: dev-add-ntf
+ doc: Notification about device appearing.
+ notify: dev-get
+ mcgrp: mgmt
+ -
+ name: dev-del-ntf
+ doc: Notification about device disappearing.
+ notify: dev-get
+ mcgrp: mgmt
+ -
+ name: dev-change-ntf
+ doc: Notification about device configuration being changed.
+ notify: dev-get
+ mcgrp: mgmt
+
+mcast-groups:
+ list:
+ -
+ name: mgmt
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index af35052d06ed..d0846ba818ee 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -30,6 +30,7 @@
#include <asm/facility.h>
#include <asm/nospec-branch.h>
#include <asm/set_memory.h>
+#include <asm/text-patching.h>
#include "bpf_jit.h"
struct bpf_jit {
@@ -50,12 +51,13 @@ struct bpf_jit {
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */
int excnt; /* Number of exception table entries */
+ int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
+ int prologue_plt; /* Start of prologue hotpatch PLT */
};
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
#define SEEN_LITERAL BIT(1) /* code uses literals */
#define SEEN_FUNC BIT(2) /* calls C functions */
-#define SEEN_TAIL_CALL BIT(3) /* code uses tail calls */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
/*
@@ -68,6 +70,10 @@ struct bpf_jit {
#define REG_0 REG_W0 /* Register 0 */
#define REG_1 REG_W1 /* Register 1 */
#define REG_2 BPF_REG_1 /* Register 2 */
+#define REG_3 BPF_REG_2 /* Register 3 */
+#define REG_4 BPF_REG_3 /* Register 4 */
+#define REG_7 BPF_REG_6 /* Register 7 */
+#define REG_8 BPF_REG_7 /* Register 8 */
#define REG_14 BPF_REG_0 /* Register 14 */
/*
@@ -507,20 +513,58 @@ static void bpf_skip(struct bpf_jit *jit, int size)
}
/*
+ * PLT for hotpatchable calls. The calling convention is the same as for the
+ * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
+ */
+extern const char bpf_plt[];
+extern const char bpf_plt_ret[];
+extern const char bpf_plt_target[];
+extern const char bpf_plt_end[];
+#define BPF_PLT_SIZE 32
+asm(
+ ".pushsection .rodata\n"
+ " .align 8\n"
+ "bpf_plt:\n"
+ " lgrl %r0,bpf_plt_ret\n"
+ " lgrl %r1,bpf_plt_target\n"
+ " br %r1\n"
+ " .align 8\n"
+ "bpf_plt_ret: .quad 0\n"
+ "bpf_plt_target: .quad 0\n"
+ "bpf_plt_end:\n"
+ " .popsection\n"
+);
+
+static void bpf_jit_plt(void *plt, void *ret, void *target)
+{
+ memcpy(plt, bpf_plt, BPF_PLT_SIZE);
+ *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
+ *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target;
+}
+
+/*
* Emit function prologue
*
* Save registers and create stack frame if necessary.
- * See stack frame layout desription in "bpf_jit.h"!
+ * See stack frame layout description in "bpf_jit.h"!
*/
-static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
+static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+ u32 stack_depth)
{
- if (jit->seen & SEEN_TAIL_CALL) {
+ /* No-op for hotpatching */
+ /* brcl 0,prologue_plt */
+ EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
+ jit->prologue_plt_ret = jit->prg;
+
+ if (fp->aux->func_idx == 0) {
+ /* Initialize the tail call counter in the main program. */
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
} else {
/*
- * There are no tail calls. Insert nops in order to have
- * tail_call_start at a predictable offset.
+ * Skip the tail call counter initialization in subprograms.
+ * Insert nops in order to have tail_call_start at a
+ * predictable offset.
*/
bpf_skip(jit, 6);
}
@@ -558,6 +602,43 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
}
/*
+ * Emit an expoline for a jump that follows
+ */
+static void emit_expoline(struct bpf_jit *jit)
+{
+ /* exrl %r0,.+10 */
+ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+ /* j . */
+ EMIT4_PCREL(0xa7f40000, 0);
+}
+
+/*
+ * Emit __s390_indirect_jump_r1 thunk if necessary
+ */
+static void emit_r1_thunk(struct bpf_jit *jit)
+{
+ if (nospec_uses_trampoline()) {
+ jit->r1_thunk_ip = jit->prg;
+ emit_expoline(jit);
+ /* br %r1 */
+ _EMIT2(0x07f1);
+ }
+}
+
+/*
+ * Call r1 either directly or via __s390_indirect_jump_r1 thunk
+ */
+static void call_r1(struct bpf_jit *jit)
+{
+ if (nospec_uses_trampoline())
+ /* brasl %r14,__s390_indirect_jump_r1 */
+ EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
+ else
+ /* basr %r14,%r1 */
+ EMIT2(0x0d00, REG_14, REG_1);
+}
+
+/*
* Function epilogue
*/
static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
@@ -570,25 +651,20 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
if (nospec_uses_trampoline()) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
- /* exrl %r0,.+10 */
- EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
- /* j . */
- EMIT4_PCREL(0xa7f40000, 0);
+ emit_expoline(jit);
}
/* br %r14 */
_EMIT2(0x07fe);
- if ((nospec_uses_trampoline()) &&
- (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
- jit->r1_thunk_ip = jit->prg;
- /* Generate __s390_indirect_jump_r1 thunk */
- /* exrl %r0,.+10 */
- EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
- /* j . */
- EMIT4_PCREL(0xa7f40000, 0);
- /* br %r1 */
- _EMIT2(0x07f1);
- }
+ if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
+ emit_r1_thunk(jit);
+
+ jit->prg = ALIGN(jit->prg, 8);
+ jit->prologue_plt = jit->prg;
+ if (jit->prg_buf)
+ bpf_jit_plt(jit->prg_buf + jit->prg,
+ jit->prg_buf + jit->prologue_plt_ret, NULL);
+ jit->prg += BPF_PLT_SIZE;
}
static int get_probe_mem_regno(const u8 *insn)
@@ -663,6 +739,34 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
}
/*
+ * Sign-extend the register if necessary
+ */
+static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
+{
+ if (!(flags & BTF_FMODEL_SIGNED_ARG))
+ return 0;
+
+ switch (size) {
+ case 1:
+ /* lgbr %r,%r */
+ EMIT4(0xb9060000, r, r);
+ return 0;
+ case 2:
+ /* lghr %r,%r */
+ EMIT4(0xb9070000, r, r);
+ return 0;
+ case 4:
+ /* lgfr %r,%r */
+ EMIT4(0xb9140000, r, r);
+ return 0;
+ case 8:
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+/*
* Compile one eBPF instruction into s390x code
*
* NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
@@ -1297,9 +1401,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
*/
case BPF_JMP | BPF_CALL:
{
- u64 func;
+ const struct btf_func_model *m;
bool func_addr_fixed;
- int ret;
+ int j, ret;
+ u64 func;
ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
&func, &func_addr_fixed);
@@ -1308,15 +1413,38 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
REG_SET_SEEN(BPF_REG_5);
jit->seen |= SEEN_FUNC;
+ /*
+ * Copy the tail call counter to where the callee expects it.
+ *
+ * Note 1: The callee can increment the tail call counter, but
+ * we do not load it back, since the x86 JIT does not do this
+ * either.
+ *
+ * Note 2: We assume that the verifier does not let us call the
+ * main program, which clears the tail call counter on entry.
+ */
+ /* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
+ _EMIT6(0xd203f000 | STK_OFF_TCCNT,
+ 0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
+
+ /* Sign-extend the kfunc arguments. */
+ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+ m = bpf_jit_find_kfunc_model(fp, insn);
+ if (!m)
+ return -1;
+
+ for (j = 0; j < m->nr_args; j++) {
+ if (sign_extend(jit, BPF_REG_1 + j,
+ m->arg_size[j],
+ m->arg_flags[j]))
+ return -1;
+ }
+ }
+
/* lgrl %w1,func */
EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
- if (nospec_uses_trampoline()) {
- /* brasl %r14,__s390_indirect_jump_r1 */
- EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
- } else {
- /* basr %r14,%w1 */
- EMIT2(0x0d00, REG_14, REG_W1);
- }
+ /* %r1() */
+ call_r1(jit);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
break;
@@ -1329,10 +1457,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
* B1: pointer to ctx
* B2: pointer to bpf_array
* B3: index in bpf_array
- */
- jit->seen |= SEEN_TAIL_CALL;
-
- /*
+ *
* if (index >= array->map.max_entries)
* goto out;
*/
@@ -1393,8 +1518,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* lg %r1,bpf_func(%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
offsetof(struct bpf_prog, bpf_func));
- /* bc 0xf,tail_call_start(%r1) */
- _EMIT4(0x47f01000 + jit->tail_call_start);
+ if (nospec_uses_trampoline()) {
+ jit->seen |= SEEN_FUNC;
+ /* aghi %r1,tail_call_start */
+ EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
+ /* brcl 0xf,__s390_indirect_jump_r1 */
+ EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
+ } else {
+ /* bc 0xf,tail_call_start(%r1) */
+ _EMIT4(0x47f01000 + jit->tail_call_start);
+ }
/* out: */
if (jit->prg_buf) {
*(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
@@ -1688,7 +1821,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
jit->prg = 0;
jit->excnt = 0;
- bpf_jit_prologue(jit, stack_depth);
+ bpf_jit_prologue(jit, fp, stack_depth);
if (bpf_set_addr(jit, 0) < 0)
return -1;
for (i = 0; i < fp->len; i += insn_count) {
@@ -1768,6 +1901,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_jit jit;
int pass;
+ if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
+ return orig_fp;
+
if (!fp->jit_requested)
return orig_fp;
@@ -1859,3 +1995,508 @@ out:
tmp : orig_fp);
return fp;
}
+
+bool bpf_jit_supports_kfunc_call(void)
+{
+ return true;
+}
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *old_addr, void *new_addr)
+{
+ struct {
+ u16 opc;
+ s32 disp;
+ } __packed insn;
+ char expected_plt[BPF_PLT_SIZE];
+ char current_plt[BPF_PLT_SIZE];
+ char *plt;
+ int err;
+
+ /* Verify the branch to be patched. */
+ err = copy_from_kernel_nofault(&insn, ip, sizeof(insn));
+ if (err < 0)
+ return err;
+ if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0)))
+ return -EINVAL;
+
+ if (t == BPF_MOD_JUMP &&
+ insn.disp == ((char *)new_addr - (char *)ip) >> 1) {
+ /*
+ * The branch already points to the destination,
+ * there is no PLT.
+ */
+ } else {
+ /* Verify the PLT. */
+ plt = (char *)ip + (insn.disp << 1);
+ err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
+ if (err < 0)
+ return err;
+ bpf_jit_plt(expected_plt, (char *)ip + 6, old_addr);
+ if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
+ return -EINVAL;
+ /* Adjust the call address. */
+ s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
+ &new_addr, sizeof(void *));
+ }
+
+ /* Adjust the mask of the branch. */
+ insn.opc = 0xc004 | (new_addr ? 0xf0 : 0);
+ s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1);
+
+ /* Make the new code visible to the other CPUs. */
+ text_poke_sync_lock();
+
+ return 0;
+}
+
+struct bpf_tramp_jit {
+ struct bpf_jit common;
+ int orig_stack_args_off;/* Offset of arguments placed on stack by the
+ * func_addr's original caller
+ */
+ int stack_size; /* Trampoline stack size */
+ int stack_args_off; /* Offset of stack arguments for calling
+ * func_addr, has to be at the top
+ */
+ int reg_args_off; /* Offset of register arguments for calling
+ * func_addr
+ */
+ int ip_off; /* For bpf_get_func_ip(), has to be at
+ * (ctx - 16)
+ */
+ int arg_cnt_off; /* For bpf_get_func_arg_cnt(), has to be at
+ * (ctx - 8)
+ */
+ int bpf_args_off; /* Offset of BPF_PROG context, which consists
+ * of BPF arguments followed by return value
+ */
+ int retval_off; /* Offset of return value (see above) */
+ int r7_r8_off; /* Offset of saved %r7 and %r8, which are used
+ * for __bpf_prog_enter() return value and
+ * func_addr respectively
+ */
+ int r14_off; /* Offset of saved %r14 */
+ int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
+ int do_fexit; /* do_fexit: label */
+};
+
+static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val)
+{
+ /* llihf %dst_reg,val_hi */
+ EMIT6_IMM(0xc00e0000, dst_reg, (val >> 32));
+ /* oilf %rdst_reg,val_lo */
+ EMIT6_IMM(0xc00d0000, dst_reg, val);
+}
+
+static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
+ const struct btf_func_model *m,
+ struct bpf_tramp_link *tlink, bool save_ret)
+{
+ struct bpf_jit *jit = &tjit->common;
+ int cookie_off = tjit->run_ctx_off +
+ offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
+ struct bpf_prog *p = tlink->link.prog;
+ int patch;
+
+ /*
+ * run_ctx.cookie = tlink->cookie;
+ */
+
+ /* %r0 = tlink->cookie */
+ load_imm64(jit, REG_W0, tlink->cookie);
+ /* stg %r0,cookie_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off);
+
+ /*
+ * if ((start = __bpf_prog_enter(p, &run_ctx)) == 0)
+ * goto skip;
+ */
+
+ /* %r1 = __bpf_prog_enter */
+ load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p));
+ /* %r2 = p */
+ load_imm64(jit, REG_2, (u64)p);
+ /* la %r3,run_ctx_off(%r15) */
+ EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off);
+ /* %r1() */
+ call_r1(jit);
+ /* ltgr %r7,%r2 */
+ EMIT4(0xb9020000, REG_7, REG_2);
+ /* brcl 8,skip */
+ patch = jit->prg;
+ EMIT6_PCREL_RILC(0xc0040000, 8, 0);
+
+ /*
+ * retval = bpf_func(args, p->insnsi);
+ */
+
+ /* %r1 = p->bpf_func */
+ load_imm64(jit, REG_1, (u64)p->bpf_func);
+ /* la %r2,bpf_args_off(%r15) */
+ EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off);
+ /* %r3 = p->insnsi */
+ if (!p->jited)
+ load_imm64(jit, REG_3, (u64)p->insnsi);
+ /* %r1() */
+ call_r1(jit);
+ /* stg %r2,retval_off(%r15) */
+ if (save_ret) {
+ if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags))
+ return -1;
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
+ tjit->retval_off);
+ }
+
+ /* skip: */
+ if (jit->prg_buf)
+ *(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1;
+
+ /*
+ * __bpf_prog_exit(p, start, &run_ctx);
+ */
+
+ /* %r1 = __bpf_prog_exit */
+ load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p));
+ /* %r2 = p */
+ load_imm64(jit, REG_2, (u64)p);
+ /* lgr %r3,%r7 */
+ EMIT4(0xb9040000, REG_3, REG_7);
+ /* la %r4,run_ctx_off(%r15) */
+ EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off);
+ /* %r1() */
+ call_r1(jit);
+
+ return 0;
+}
+
+static int alloc_stack(struct bpf_tramp_jit *tjit, size_t size)
+{
+ int stack_offset = tjit->stack_size;
+
+ tjit->stack_size += size;
+ return stack_offset;
+}
+
+/* ABI uses %r2 - %r6 for parameter passing. */
+#define MAX_NR_REG_ARGS 5
+
+/* The "L" field of the "mvc" instruction is 8 bits. */
+#define MAX_MVC_SIZE 256
+#define MAX_NR_STACK_ARGS (MAX_MVC_SIZE / sizeof(u64))
+
+/* -mfentry generates a 6-byte nop on s390x. */
+#define S390X_PATCH_SIZE 6
+
+static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ struct bpf_tramp_jit *tjit,
+ const struct btf_func_model *m,
+ u32 flags,
+ struct bpf_tramp_links *tlinks,
+ void *func_addr)
+{
+ struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
+ struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
+ int nr_bpf_args, nr_reg_args, nr_stack_args;
+ struct bpf_jit *jit = &tjit->common;
+ int arg, bpf_arg_off;
+ int i, j;
+
+ /* Support as many stack arguments as "mvc" instruction can handle. */
+ nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS);
+ nr_stack_args = m->nr_args - nr_reg_args;
+ if (nr_stack_args > MAX_NR_STACK_ARGS)
+ return -ENOTSUPP;
+
+ /* Return to %r14, since func_addr and %r0 are not available. */
+ if (!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK))
+ flags |= BPF_TRAMP_F_SKIP_FRAME;
+
+ /*
+ * Compute how many arguments we need to pass to BPF programs.
+ * BPF ABI mirrors that of x86_64: arguments that are 16 bytes or
+ * smaller are packed into 1 or 2 registers; larger arguments are
+ * passed via pointers.
+ * In s390x ABI, arguments that are 8 bytes or smaller are packed into
+ * a register; larger arguments are passed via pointers.
+ * We need to deal with this difference.
+ */
+ nr_bpf_args = 0;
+ for (i = 0; i < m->nr_args; i++) {
+ if (m->arg_size[i] <= 8)
+ nr_bpf_args += 1;
+ else if (m->arg_size[i] <= 16)
+ nr_bpf_args += 2;
+ else
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Calculate the stack layout.
+ */
+
+ /* Reserve STACK_FRAME_OVERHEAD bytes for the callees. */
+ tjit->stack_size = STACK_FRAME_OVERHEAD;
+ tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
+ tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
+ tjit->ip_off = alloc_stack(tjit, sizeof(u64));
+ tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64));
+ tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
+ tjit->retval_off = alloc_stack(tjit, sizeof(u64));
+ tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
+ tjit->r14_off = alloc_stack(tjit, sizeof(u64));
+ tjit->run_ctx_off = alloc_stack(tjit,
+ sizeof(struct bpf_tramp_run_ctx));
+ /* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
+ tjit->stack_size -= STACK_FRAME_OVERHEAD;
+ tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
+
+ /* aghi %r15,-stack_size */
+ EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
+ /* stmg %r2,%rN,fwd_reg_args_off(%r15) */
+ if (nr_reg_args)
+ EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
+ REG_2 + (nr_reg_args - 1), REG_15,
+ tjit->reg_args_off);
+ for (i = 0, j = 0; i < m->nr_args; i++) {
+ if (i < MAX_NR_REG_ARGS)
+ arg = REG_2 + i;
+ else
+ arg = tjit->orig_stack_args_off +
+ (i - MAX_NR_REG_ARGS) * sizeof(u64);
+ bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64);
+ if (m->arg_size[i] <= 8) {
+ if (i < MAX_NR_REG_ARGS)
+ /* stg %arg,bpf_arg_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, arg,
+ REG_0, REG_15, bpf_arg_off);
+ else
+ /* mvc bpf_arg_off(8,%r15),arg(%r15) */
+ _EMIT6(0xd207f000 | bpf_arg_off,
+ 0xf000 | arg);
+ j += 1;
+ } else {
+ if (i < MAX_NR_REG_ARGS) {
+ /* mvc bpf_arg_off(16,%r15),0(%arg) */
+ _EMIT6(0xd20ff000 | bpf_arg_off,
+ reg2hex[arg] << 12);
+ } else {
+ /* lg %r1,arg(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_0,
+ REG_15, arg);
+ /* mvc bpf_arg_off(16,%r15),0(%r1) */
+ _EMIT6(0xd20ff000 | bpf_arg_off, 0x1000);
+ }
+ j += 2;
+ }
+ }
+ /* stmg %r7,%r8,r7_r8_off(%r15) */
+ EMIT6_DISP_LH(0xeb000000, 0x0024, REG_7, REG_8, REG_15,
+ tjit->r7_r8_off);
+ /* stg %r14,r14_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off);
+
+ if (flags & BPF_TRAMP_F_ORIG_STACK) {
+ /*
+ * The ftrace trampoline puts the return address (which is the
+ * address of the original function + S390X_PATCH_SIZE) into
+ * %r0; see ftrace_shared_hotpatch_trampoline_br and
+ * ftrace_init_nop() for details.
+ */
+
+ /* lgr %r8,%r0 */
+ EMIT4(0xb9040000, REG_8, REG_0);
+ } else {
+ /* %r8 = func_addr + S390X_PATCH_SIZE */
+ load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE);
+ }
+
+ /*
+ * ip = func_addr;
+ * arg_cnt = m->nr_args;
+ */
+
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ /* %r0 = func_addr */
+ load_imm64(jit, REG_0, (u64)func_addr);
+ /* stg %r0,ip_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
+ tjit->ip_off);
+ }
+ /* lghi %r0,nr_bpf_args */
+ EMIT4_IMM(0xa7090000, REG_0, nr_bpf_args);
+ /* stg %r0,arg_cnt_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
+ tjit->arg_cnt_off);
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ /*
+ * __bpf_tramp_enter(im);
+ */
+
+ /* %r1 = __bpf_tramp_enter */
+ load_imm64(jit, REG_1, (u64)__bpf_tramp_enter);
+ /* %r2 = im */
+ load_imm64(jit, REG_2, (u64)im);
+ /* %r1() */
+ call_r1(jit);
+ }
+
+ for (i = 0; i < fentry->nr_links; i++)
+ if (invoke_bpf_prog(tjit, m, fentry->links[i],
+ flags & BPF_TRAMP_F_RET_FENTRY_RET))
+ return -EINVAL;
+
+ if (fmod_ret->nr_links) {
+ /*
+ * retval = 0;
+ */
+
+ /* xc retval_off(8,%r15),retval_off(%r15) */
+ _EMIT6(0xd707f000 | tjit->retval_off,
+ 0xf000 | tjit->retval_off);
+
+ for (i = 0; i < fmod_ret->nr_links; i++) {
+ if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true))
+ return -EINVAL;
+
+ /*
+ * if (retval)
+ * goto do_fexit;
+ */
+
+ /* ltg %r0,retval_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0002, REG_0, REG_0, REG_15,
+ tjit->retval_off);
+ /* brcl 7,do_fexit */
+ EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit);
+ }
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ /*
+ * retval = func_addr(args);
+ */
+
+ /* lmg %r2,%rN,reg_args_off(%r15) */
+ if (nr_reg_args)
+ EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
+ REG_2 + (nr_reg_args - 1), REG_15,
+ tjit->reg_args_off);
+ /* mvc stack_args_off(N,%r15),orig_stack_args_off(%r15) */
+ if (nr_stack_args)
+ _EMIT6(0xd200f000 |
+ (nr_stack_args * sizeof(u64) - 1) << 16 |
+ tjit->stack_args_off,
+ 0xf000 | tjit->orig_stack_args_off);
+ /* lgr %r1,%r8 */
+ EMIT4(0xb9040000, REG_1, REG_8);
+ /* %r1() */
+ call_r1(jit);
+ /* stg %r2,retval_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
+ tjit->retval_off);
+
+ im->ip_after_call = jit->prg_buf + jit->prg;
+
+ /*
+ * The following nop will be patched by bpf_tramp_image_put().
+ */
+
+ /* brcl 0,im->ip_epilogue */
+ EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue);
+ }
+
+ /* do_fexit: */
+ tjit->do_fexit = jit->prg;
+ for (i = 0; i < fexit->nr_links; i++)
+ if (invoke_bpf_prog(tjit, m, fexit->links[i], false))
+ return -EINVAL;
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ im->ip_epilogue = jit->prg_buf + jit->prg;
+
+ /*
+ * __bpf_tramp_exit(im);
+ */
+
+ /* %r1 = __bpf_tramp_exit */
+ load_imm64(jit, REG_1, (u64)__bpf_tramp_exit);
+ /* %r2 = im */
+ load_imm64(jit, REG_2, (u64)im);
+ /* %r1() */
+ call_r1(jit);
+ }
+
+ /* lmg %r2,%rN,reg_args_off(%r15) */
+ if ((flags & BPF_TRAMP_F_RESTORE_REGS) && nr_reg_args)
+ EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
+ REG_2 + (nr_reg_args - 1), REG_15,
+ tjit->reg_args_off);
+ /* lgr %r1,%r8 */
+ if (!(flags & BPF_TRAMP_F_SKIP_FRAME))
+ EMIT4(0xb9040000, REG_1, REG_8);
+ /* lmg %r7,%r8,r7_r8_off(%r15) */
+ EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15,
+ tjit->r7_r8_off);
+ /* lg %r14,r14_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off);
+ /* lg %r2,retval_off(%r15) */
+ if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
+ tjit->retval_off);
+ /* aghi %r15,stack_size */
+ EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
+ /* Emit an expoline for the following indirect jump. */
+ if (nospec_uses_trampoline())
+ emit_expoline(jit);
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ /* br %r14 */
+ _EMIT2(0x07fe);
+ else
+ /* br %r1 */
+ _EMIT2(0x07f1);
+
+ emit_r1_thunk(jit);
+
+ return 0;
+}
+
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
+ void *image_end, const struct btf_func_model *m,
+ u32 flags, struct bpf_tramp_links *tlinks,
+ void *func_addr)
+{
+ struct bpf_tramp_jit tjit;
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ /* Compute offsets, check whether the code fits. */
+ memset(&tjit, 0, sizeof(tjit));
+ } else {
+ /* Generate the code. */
+ tjit.common.prg = 0;
+ tjit.common.prg_buf = image;
+ }
+ ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
+ tlinks, func_addr);
+ if (ret < 0)
+ return ret;
+ if (tjit.common.prg > (char *)image_end - (char *)image)
+ /*
+ * Use the same error code as for exceeding
+ * BPF_MAX_TRAMP_LINKS.
+ */
+ return -E2BIG;
+ }
+
+ return ret;
+}
+
+bool bpf_jit_supports_subprog_tailcalls(void)
+{
+ return true;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index e8ad5ea31aff..d3999db7c6a2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -597,7 +597,9 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
if (rc)
return rc;
}
+ xdp_features_set_redirect_target(netdev, false);
} else if (old_bpf_prog) {
+ xdp_features_clear_redirect_target(netdev);
rc = ena_destroy_and_free_all_xdp_queues(adapter);
if (rc)
return rc;
@@ -4103,6 +4105,8 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
/* Set offload features */
ena_set_dev_offloads(feat, netdev);
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
adapter->max_mtu = feat->dev_attr.max_mtu;
netdev->max_mtu = adapter->max_mtu;
netdev->min_mtu = ENA_MIN_MTU;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 06508eebb585..d6d6d5d37ff3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -384,6 +384,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
+ self->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
}
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index fd6b5862f74e..ab5dfb3a4081 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -13687,6 +13687,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG;
+
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 36d5202c0aee..5843c93b1711 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -422,9 +422,11 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
+ xdp_features_set_redirect_target(dev, true);
} else {
int rx, tx;
+ xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
bnxt_get_max_rings(bp, &rx, &tx, true);
if (rx > 1) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f2f95493ec89..8b25313c7f6b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2218,6 +2218,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+
/* MTU range: 64 - 9200 */
netdev->min_mtu = NIC_HW_MIN_FRS;
netdev->max_mtu = NIC_HW_MAX_FRS;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index c3cf427a9409..6982aaa928b5 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1926,6 +1926,10 @@ static int tsnep_probe(struct platform_device *pdev)
netdev->features = NETIF_F_SG;
netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 027fff9f7db0..9318a2554056 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -244,6 +244,10 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= net_dev->hw_features;
net_dev->vlan_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
if (is_valid_ether_addr(mac_addr)) {
memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
eth_hw_addr_set(net_dev, mac_addr);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 2e79d18fc3c7..746ccfde7255 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4596,6 +4596,10 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_NDO_XMIT;
if (priv->dpni_attrs.vlan_filter_entries)
net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 97056dc3496d..7cd22d370caa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -807,6 +807,9 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features |= NETIF_F_RXHASH;
ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
priv->active_offloads |= ENETC_F_QCI;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index b4cce30e526a..df86770731ad 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -1160,6 +1160,11 @@ static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
WRITE_ONCE(rxqs[i]->xdp_prog, prog);
}
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+
dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU;
old_prog = xchg(&fp->xdp_prog, prog);
if (old_prog)
@@ -1765,6 +1770,7 @@ static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->mpls_features = netdev->vlan_features;
netdev->hw_enc_features = netdev->hw_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = FUN_MAX_MTU;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 43693f902c27..3ee00c3bc319 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -13339,9 +13339,11 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
- if (!prog)
+ if (!prog) {
+ xdp_features_clear_redirect_target(vsi->netdev);
/* Wait until ndo_xsk_wakeup completes. */
synchronize_rcu();
+ }
i40e_reset_and_rebuild(pf, true, true);
}
@@ -13362,11 +13364,13 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
/* Kick start the NAPI context if there is an AF_XDP socket open
* on that queue id. This so that receiving will start.
*/
- if (need_reset && prog)
+ if (need_reset && prog) {
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->xdp_rings[i]->xsk_pool)
(void)i40e_xsk_wakeup(vsi->netdev, i,
XDP_WAKEUP_RX);
+ xdp_features_set_redirect_target(vsi->netdev, true);
+ }
return 0;
}
@@ -13783,6 +13787,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
netdev->features &= ~NETIF_F_HW_TC;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 554095b25f44..1911d644dfa8 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -355,9 +355,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
- else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
- return XDP_PACKET_HEADROOM;
-
return 0;
}
@@ -495,7 +492,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
- u16 num_bufs = ICE_DESC_UNUSED(ring);
+ u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
int err;
ring->rx_buf_len = ring->vsi->rx_buf_len;
@@ -503,8 +500,10 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
+ __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->vsi->rx_buf_len);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
@@ -524,9 +523,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
} else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
+ __xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->vsi->rx_buf_len);
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
@@ -536,6 +537,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
}
}
+ xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+ ring->xdp.data = NULL;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 6f03e9fe331a..b360bd8f1599 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3046,8 +3046,6 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
- xdp_rings[i].next_dd = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
- xdp_rings[i].next_rs = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
xdp_rings[i].desc = NULL;
xdp_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&xdp_rings[i]);
@@ -3092,7 +3090,7 @@ process_rx:
/* allocate Rx buffers */
err = ice_alloc_rx_bufs(&rx_rings[i],
- ICE_DESC_UNUSED(&rx_rings[i]));
+ ICE_RX_DESC_UNUSED(&rx_rings[i]));
rx_unwind:
if (err) {
while (i) {
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 960197b2301c..37fe639712e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1961,8 +1961,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_2048;
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
@@ -1971,11 +1971,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
-#if (PAGE_SIZE < 8192)
vsi->rx_buf_len = ICE_RXBUF_3072;
-#else
- vsi->rx_buf_len = ICE_RXBUF_2048;
-#endif
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index e6a35f875520..0712c1055aea 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -22,6 +22,7 @@
#include "ice_eswitch.h"
#include "ice_tc_lib.h"
#include "ice_vsi_vlan_ops.h"
+#include <net/xdp_sock_drv.h>
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -2569,8 +2570,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->netdev = NULL;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
- xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
- xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
@@ -2862,6 +2861,18 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
}
/**
+ * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: Pointer to VSI structure
+ */
+static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+{
+ if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ return ICE_RXBUF_1664;
+ else
+ return ICE_RXBUF_3072;
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2871,13 +2882,16 @@ static int
ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+ unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
bool if_running = netif_running(vsi->netdev);
int ret = 0, xdp_ring_err = 0;
- if (frame_size > vsi->rx_buf_len) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
- return -EOPNOTSUPP;
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (frame_size > ice_max_xdp_frame_size(vsi)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MTU is too large for linear frames and XDP prog does not support frags");
+ return -EOPNOTSUPP;
+ }
}
/* need to stop netdev while setting up the program for Rx rings */
@@ -2898,11 +2912,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
+ xdp_features_set_redirect_target(vsi->netdev, false);
/* reallocate Rx queues that are used for zero-copy */
xdp_ring_err = ice_realloc_zc_buf(vsi, true);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ xdp_features_clear_redirect_target(vsi->netdev);
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
@@ -4552,6 +4568,8 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
np->vsi = vsi;
ice_set_netdev_features(netdev);
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
ice_set_ops(netdev);
if (vsi->type == ICE_VSI_PF) {
@@ -7514,18 +7532,6 @@ clear_recovery:
}
/**
- * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
- * @vsi: Pointer to VSI structure
- */
-static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
-{
- if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
- else
- return ICE_RXBUF_3072;
-}
-
-/**
* ice_change_mtu - NDO callback to change the MTU
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -7537,6 +7543,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct bpf_prog *prog;
u8 count = 0;
int err = 0;
@@ -7545,7 +7552,8 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
- if (ice_is_xdp_ena_vsi(vsi)) {
+ prog = vsi->xdp_prog;
+ if (prog && !prog->aux->xdp_has_frags) {
int frame_size = ice_max_xdp_frame_size(vsi);
if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
@@ -7553,6 +7561,12 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
frame_size - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
+ } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
+ if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
+ netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
+ ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
+ return -EINVAL;
+ }
}
/* if a reset is in progress, wait for some time for it to complete */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index ccf09c957a1c..466113c86e6f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -113,12 +113,16 @@ static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) {
devm_kfree(ring->dev, tx_buf->raw_buf);
- else if (ice_ring_is_xdp(ring))
- page_frag_free(tx_buf->raw_buf);
- else
+ } else if (ice_ring_is_xdp(ring)) {
+ if (ring->xsk_pool)
+ xsk_buff_free(tx_buf->xdp);
+ else
+ page_frag_free(tx_buf->raw_buf);
+ } else {
dev_kfree_skb_any(tx_buf->skb);
+ }
if (dma_unmap_len(tx_buf, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buf, dma),
@@ -174,8 +178,6 @@ tx_skip_free:
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
- tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
if (!tx_ring->netdev)
return;
@@ -382,6 +384,7 @@ err:
*/
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
+ struct xdp_buff *xdp = &rx_ring->xdp;
struct device *dev = rx_ring->dev;
u32 size;
u16 i;
@@ -390,16 +393,16 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
-
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
+ if (xdp->data) {
+ xdp_return_buff(xdp);
+ xdp->data = NULL;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
@@ -437,6 +440,7 @@ rx_skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->first_desc = 0;
rx_ring->next_to_use = 0;
}
@@ -506,6 +510,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->first_desc = 0;
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
@@ -523,8 +528,16 @@ err:
return -ENOMEM;
}
+/**
+ * ice_rx_frame_truesize
+ * @rx_ring: ptr to Rx ring
+ * @size: size
+ *
+ * calculate the truesize with taking into the account PAGE_SIZE of
+ * underlying arch
+ */
static unsigned int
-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
+ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
{
unsigned int truesize;
@@ -545,34 +558,39 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused s
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
+ * @rx_buf: Rx buffer to store the XDP action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static int
+static void
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+ struct ice_rx_buf *rx_buf)
{
- int err;
+ unsigned int ret = ICE_XDP_PASS;
u32 act;
+ if (!xdp_prog)
+ goto exit;
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
- return ICE_XDP_PASS;
+ break;
case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
- err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ ret = __ice_xmit_xdp_ring(xdp, xdp_ring);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
- if (err == ICE_XDP_CONSUMED)
+ if (ret == ICE_XDP_CONSUMED)
goto out_failure;
- return err;
+ break;
case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
+ if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
goto out_failure;
- return ICE_XDP_REDIR;
+ ret = ICE_XDP_REDIR;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
@@ -581,8 +599,12 @@ out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- return ICE_XDP_CONSUMED;
+ ret = ICE_XDP_CONSUMED;
}
+exit:
+ rx_buf->act = ret;
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring, ret);
}
/**
@@ -605,6 +627,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *xdp_ring;
+ struct ice_tx_buf *tx_buf;
int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state))
@@ -627,16 +650,18 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
}
+ tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
- err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ err = ice_xmit_xdp_ring(xdpf, xdp_ring);
if (err != ICE_XDP_TX)
break;
nxmit++;
}
+ tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
@@ -706,7 +731,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -783,7 +808,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
- * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
@@ -791,7 +815,7 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
* page freed
*/
static bool
-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -802,7 +826,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
+ if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@@ -824,33 +848,44 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
}
/**
- * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
* @rx_ring: Rx descriptor ring to transact packets on
+ * @xdp: xdp buff to place the data into
* @rx_buf: buffer containing page to add
- * @skb: sk_buff to place the data into
* @size: packet length from rx_desc
*
- * This function will add the data contained in rx_buf->page to the skb.
- * It will just attach the page as a frag to the skb.
- * The function will then update the page offset.
+ * This function will add the data contained in rx_buf->page to the xdp buf.
+ * It will just attach the page as a frag.
*/
-static void
-ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct sk_buff *skb, unsigned int size)
+static int
+ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct ice_rx_buf *rx_buf, const unsigned int size)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!size)
- return;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->page_offset, size, truesize);
+ return 0;
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(xdp);
+ }
+
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ rx_buf->page_offset, size);
+ sinfo->xdp_frags_size += size;
- /* page is being used so we must update the page offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ if (page_is_pfmemalloc(rx_buf->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ return 0;
}
/**
@@ -886,19 +921,18 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
- * @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
- int *rx_buf_pgcnt)
+ const unsigned int ntc)
{
struct ice_rx_buf *rx_buf;
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- *rx_buf_pgcnt =
+ rx_buf = &rx_ring->rx_buf[ntc];
+ rx_buf->pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buf->page);
#else
@@ -922,26 +956,25 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
*
- * This function builds an skb around an existing Rx buffer, taking care
- * to set up the skb correctly and avoid any memcpy overhead.
+ * This function builds an skb around an existing XDP buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead. Driver has
+ * already combined frags (if any) to skb_shared_info.
*/
static struct sk_buff *
-ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
u8 metasize = xdp->data - xdp->data_meta;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(xdp->data_end -
- xdp->data_hard_start);
-#endif
+ struct skb_shared_info *sinfo = NULL;
+ unsigned int nr_frags;
struct sk_buff *skb;
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
* likely have a consumer accessing first few bytes of meta
@@ -949,7 +982,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = napi_build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
return NULL;
@@ -964,8 +997,11 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
if (metasize)
skb_metadata_set(skb, metasize);
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
@@ -981,24 +1017,30 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
* skb correctly.
*/
static struct sk_buff *
-ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
+ struct skb_shared_info *sinfo = NULL;
+ struct ice_rx_buf *rx_buf;
+ unsigned int nr_frags = 0;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(xdp->data_meta);
+ net_prefetch(xdp->data);
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- ICE_RX_HDR_SIZE + metasize,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
+ rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
skb_record_rx_queue(skb, rx_ring->q_index);
/* Determine available headroom for copy */
headlen = size;
@@ -1006,32 +1048,42 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
- ALIGN(headlen + metasize, sizeof(long)));
-
- if (metasize) {
- skb_metadata_set(skb, metasize);
- __skb_pull(skb, metasize);
- }
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
+ sizeof(long)));
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
if (size) {
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ /* besides adding here a partial frag, we are going to add
+ * frags from xdp_buff, make sure there is enough space for
+ * them
+ */
+ if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
skb_add_rx_frag(skb, 0, rx_buf->page,
- rx_buf->page_offset + headlen, size, truesize);
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ rx_buf->page_offset + headlen, size,
+ xdp->frame_sz);
} else {
- /* buffer is unused, reset bias back to rx_buf; data was copied
- * onto skb's linear part so there's no need for adjusting
- * page offset and we can reuse this buffer as-is
+ /* buffer is unused, change the act that should be taken later
+ * on; data was copied onto skb's linear part so there's no
+ * need for adjusting page offset and we can reuse this buffer
+ * as-is
*/
- rx_buf->pagecnt_bias++;
+ rx_buf->act = ICE_SKB_CONSUMED;
+ }
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
+
+ memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
+ sizeof(skb_frag_t) * nr_frags);
+
+ xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
}
return skb;
@@ -1041,26 +1093,17 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
- * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
- * This function will update next_to_clean and then clean up the contents
- * of the rx_buf. It will either recycle the buffer or unmap it and free
- * the associated resources.
+ * This function will clean up the contents of the rx_buf. It will either
+ * recycle the buffer or unmap it and free the associated resources.
*/
static void
-ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- int rx_buf_pgcnt)
+ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- u16 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
+ if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
@@ -1076,27 +1119,6 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
}
/**
- * ice_is_non_eop - process handling of non-EOP buffers
- * @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- *
- * If the buffer is an EOP buffer, this function exits returning false,
- * otherwise return true indicating that this is in fact a non-EOP buffer.
- */
-static bool
-ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
-{
- /* if we are the last buffer then there is nothing else to do */
-#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
- if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
- return false;
-
- rx_ring->ring_stats->rx_stats.non_eop_descs++;
-
- return true;
-}
-
-/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1110,39 +1132,42 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
*/
int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
+ struct xdp_buff *xdp = &rx_ring->xdp;
struct ice_tx_ring *xdp_ring = NULL;
- unsigned int xdp_res, xdp_xmit = 0;
- struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
- struct xdp_buff xdp;
+ u32 ntc = rx_ring->next_to_clean;
+ u32 cnt = rx_ring->count;
+ u32 cached_ntc = ntc;
+ u32 xdp_xmit = 0;
+ u32 cached_ntu;
bool failure;
+ u32 first;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+ xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
- xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
- if (xdp_prog)
+ if (xdp_prog) {
xdp_ring = rx_ring->xdp_ring;
+ cached_ntu = xdp_ring->next_to_use;
+ }
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
- unsigned char *hard_start;
+ struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
- int rx_buf_pgcnt;
u16 vlan_tag = 0;
u16 rx_ptype;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
- rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
@@ -1166,8 +1191,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- ice_put_rx_buf(rx_ring, NULL, 0);
- cleaned_count++;
+ if (++ntc == cnt)
+ ntc = 0;
continue;
}
@@ -1175,65 +1200,56 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
+ rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
- if (!size) {
- xdp.data = NULL;
- xdp.data_end = NULL;
- xdp.data_hard_start = NULL;
- xdp.data_meta = NULL;
- goto construct_skb;
- }
+ if (!xdp->data) {
+ void *hard_start;
- hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
- offset;
- xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
#if (PAGE_SIZE > 4096)
- /* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
+ xdp_buff_clear_frags_flag(xdp);
+ } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ break;
+ }
+ if (++ntc == cnt)
+ ntc = 0;
- if (!xdp_prog)
- goto construct_skb;
+ /* skip if it is NOP desc */
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
- if (!xdp_res)
+ ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
+ if (rx_buf->act == ICE_XDP_PASS)
goto construct_skb;
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
- } else {
- rx_buf->pagecnt_bias++;
- }
- total_rx_bytes += size;
+ total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
continue;
construct_skb:
- if (skb) {
- ice_add_rx_frag(rx_ring, rx_buf, skb, size);
- } else if (likely(xdp.data)) {
- if (ice_ring_uses_build_skb(rx_ring))
- skb = ice_build_skb(rx_ring, rx_buf, &xdp);
- else
- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
- }
+ if (likely(ice_ring_uses_build_skb(rx_ring)))
+ skb = ice_build_skb(rx_ring, xdp);
+ else
+ skb = ice_construct_skb(rx_ring, xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
- rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
- if (rx_buf)
- rx_buf->pagecnt_bias++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ rx_buf->act = ICE_XDP_CONSUMED;
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring,
+ ICE_XDP_CONSUMED);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
break;
}
-
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
- cleaned_count++;
-
- /* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc))
- continue;
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1245,10 +1261,8 @@ construct_skb:
vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
- if (eth_skb_pad(skb)) {
- skb = NULL;
+ if (eth_skb_pad(skb))
continue;
- }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1262,18 +1276,34 @@ construct_skb:
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
ice_receive_skb(rx_ring, skb, vlan_tag);
- skb = NULL;
/* update budget accounting */
total_rx_pkts++;
}
+ first = rx_ring->first_desc;
+ while (cached_ntc != first) {
+ struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
+
+ if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ xdp_xmit |= buf->act;
+ } else if (buf->act & ICE_XDP_CONSUMED) {
+ buf->pagecnt_bias++;
+ } else if (buf->act == ICE_XDP_PASS) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ }
+
+ ice_put_rx_buf(rx_ring, buf);
+ if (++cached_ntc >= cnt)
+ cached_ntc = 0;
+ }
+ rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
- failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
+ failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
- if (xdp_prog)
- ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
- rx_ring->skb = skb;
+ if (xdp_xmit)
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
if (rx_ring->ring_stats)
ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 4fd0e5d0a313..efa3d378f19e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -9,10 +9,12 @@
#define ICE_DFLT_IRQ_WORK 256
#define ICE_RXBUF_3072 3072
#define ICE_RXBUF_2048 2048
+#define ICE_RXBUF_1664 1664
#define ICE_RXBUF_1536 1536
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
+#define ICE_MAX_FRAME_LEGACY_RX 8320
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -110,6 +112,10 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
+#define ICE_RX_DESC_UNUSED(R) \
+ ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->first_desc - (R)->next_to_use - 1)
+
#define ICE_RING_QUARTER(R) ((R)->count >> 2)
#define ICE_TX_FLAGS_TSO BIT(0)
@@ -134,6 +140,7 @@ static inline int ice_skb_pad(void)
#define ICE_XDP_TX BIT(1)
#define ICE_XDP_REDIR BIT(2)
#define ICE_XDP_EXIT BIT(3)
+#define ICE_SKB_CONSUMED ICE_XDP_CONSUMED
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -143,13 +150,20 @@ static inline int ice_skb_pad(void)
#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
struct ice_tx_buf {
- struct ice_tx_desc *next_to_watch;
+ union {
+ struct ice_tx_desc *next_to_watch;
+ u32 rs_idx;
+ };
union {
struct sk_buff *skb;
void *raw_buf; /* used for XDP */
+ struct xdp_buff *xdp; /* used for XDP_TX ZC */
};
unsigned int bytecount;
- unsigned short gso_segs;
+ union {
+ unsigned int gso_segs;
+ unsigned int nr_frags; /* used for mbuf XDP */
+ };
u32 tx_flags;
DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
@@ -170,7 +184,9 @@ struct ice_rx_buf {
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
- u16 pagecnt_bias;
+ unsigned int pgcnt;
+ unsigned int act;
+ unsigned int pagecnt_bias;
};
struct ice_q_stats {
@@ -273,42 +289,44 @@ struct ice_rx_ring {
struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
+ u16 q_index; /* Queue number of ring */
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 next_to_alloc;
+ /* CL2 - 2nd cacheline starts here */
union {
struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf;
};
- /* CL2 - 2nd cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
+ struct xdp_buff xdp;
/* CL3 - 3rd cacheline starts here */
- u16 q_index; /* Queue number of ring */
-
- u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
+ struct bpf_prog *xdp_prog;
+ u16 rx_offset;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u16 next_to_alloc;
- u16 rx_offset;
- u16 rx_buf_len;
+ u16 first_desc;
/* stats structs */
struct ice_ring_stats *ring_stats;
struct rcu_head rcu; /* to avoid race on free */
- /* CL4 - 3rd cacheline starts here */
+ /* CL4 - 4th cacheline starts here */
struct ice_channel *ch;
- struct bpf_prog *xdp_prog;
struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool;
- struct sk_buff *skb;
dma_addr_t dma; /* physical address of ring */
u64 cached_phctime;
+ u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
u8 flags;
+ /* CL5 - 5th cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
struct ice_tx_ring {
@@ -326,12 +344,11 @@ struct ice_tx_ring {
struct xsk_buff_pool *xsk_pool;
u16 next_to_use;
u16 next_to_clean;
- u16 next_rs;
- u16 next_dd;
u16 q_handle; /* Queue handle per TC */
u16 reg_idx; /* HW register index of the ring */
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
+ u16 xdp_tx_active;
/* stats structs */
struct ice_ring_stats *ring_stats;
/* CL3 - 3rd cacheline starts here */
@@ -342,7 +359,6 @@ struct ice_tx_ring {
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
/* CL4 - 4th cacheline starts here */
- u16 xdp_tx_active;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
@@ -431,7 +447,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
union ice_32b_rx_flex_desc;
-bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
ice_select_queue(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 25f04266c668..9bbed3f14e42 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -221,128 +221,193 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
}
/**
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
+ * @xdp_ring: XDP Tx ring
+ * @tx_buf: Tx buffer to clean
+ */
+static void
+ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+{
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ xdp_ring->xdp_tx_active--;
+ page_frag_free(tx_buf->raw_buf);
+ tx_buf->raw_buf = NULL;
+}
+
+/**
* ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
* @xdp_ring: XDP ring to clean
*/
-static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
{
- unsigned int total_bytes = 0, total_pkts = 0;
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- u16 ntc = xdp_ring->next_to_clean;
- struct ice_tx_desc *next_dd_desc;
- u16 next_dd = xdp_ring->next_dd;
- struct ice_tx_buf *tx_buf;
- int i;
+ int total_bytes = 0, total_pkts = 0;
+ u32 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u32 cnt = xdp_ring->count;
+ u32 ready_frames = 0;
+ u32 frags;
+ u32 idx;
+ u32 ret;
+
+ idx = xdp_ring->tx_buf[ntc].rs_idx;
+ tx_desc = ICE_TX_DESC(xdp_ring, idx);
+ if (tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+ if (idx >= ntc)
+ ready_frames = idx - ntc + 1;
+ else
+ ready_frames = idx + cnt - ntc + 1;
+ }
- next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
- if (!(next_dd_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- return;
+ if (!ready_frames)
+ return 0;
+ ret = ready_frames;
- for (i = 0; i < tx_thresh; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
+ while (ready_frames) {
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
+ /* bytecount holds size of head + frags */
total_bytes += tx_buf->bytecount;
- /* normally tx_buf->gso_segs was taken but at this point
- * it's always 1 for us
- */
+ frags = tx_buf->nr_frags;
total_pkts++;
+ /* count head + frags */
+ ready_frames -= frags + 1;
- page_frag_free(tx_buf->raw_buf);
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- tx_buf->raw_buf = NULL;
-
+ if (xdp_ring->xsk_pool)
+ xsk_buff_free(tx_buf->xdp);
+ else
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
ntc++;
- if (ntc >= xdp_ring->count)
+ if (ntc == cnt)
ntc = 0;
+
+ for (int i = 0; i < frags; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ ntc++;
+ if (ntc == cnt)
+ ntc = 0;
+ }
}
- next_dd_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
- if (xdp_ring->next_dd > xdp_ring->count)
- xdp_ring->next_dd = tx_thresh - 1;
+ tx_desc->cmd_type_offset_bsz = 0;
xdp_ring->next_to_clean = ntc;
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
+
+ return ret;
}
/**
- * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
- * @data: packet data pointer
- * @size: packet data size
+ * __ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdp: XDP buffer to be placed onto Tx descriptors
* @xdp_ring: XDP ring for transmission
*/
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- u16 i = xdp_ring->next_to_use;
+ struct skb_shared_info *sinfo = NULL;
+ u32 size = xdp->data_end - xdp->data;
+ struct device *dev = xdp_ring->dev;
+ u32 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_head;
struct ice_tx_buf *tx_buf;
- dma_addr_t dma;
+ u32 cnt = xdp_ring->count;
+ void *data = xdp->data;
+ u32 nr_frags = 0;
+ u32 free_space;
+ u32 frag = 0;
+
+ free_space = ICE_DESC_UNUSED(xdp_ring);
+
+ if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring))
+ free_space += ice_clean_xdp_irq(xdp_ring);
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ if (free_space < nr_frags + 1) {
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
+ return ICE_XDP_CONSUMED;
+ }
+ }
- if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
- ice_clean_xdp_irq(xdp_ring);
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_head = &xdp_ring->tx_buf[ntu];
+ tx_buf = tx_head;
- if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ICE_XDP_CONSUMED;
- }
+ for (;;) {
+ dma_addr_t dma;
- dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(xdp_ring->dev, dma))
- return ICE_XDP_CONSUMED;
+ dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_unmap;
- tx_buf = &xdp_ring->tx_buf[i];
- tx_buf->bytecount = size;
- tx_buf->gso_segs = 1;
- tx_buf->raw_buf = data;
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
- /* record length, and DMA address */
- dma_unmap_len_set(tx_buf, len, size);
- dma_unmap_addr_set(tx_buf, dma, dma);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
+ tx_buf->raw_buf = data;
- tx_desc = ICE_TX_DESC(xdp_ring, i);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
- size, 0);
+ ntu++;
+ if (ntu == cnt)
+ ntu = 0;
- xdp_ring->xdp_tx_active++;
- i++;
- if (i == xdp_ring->count) {
- i = 0;
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = tx_thresh - 1;
- }
- xdp_ring->next_to_use = i;
+ if (frag == nr_frags)
+ break;
- if (i > xdp_ring->next_rs) {
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_buf = &xdp_ring->tx_buf[ntu];
+
+ data = skb_frag_address(&sinfo->frags[frag]);
+ size = skb_frag_size(&sinfo->frags[frag]);
+ frag++;
}
+ /* store info about bytecount and frag count in first desc */
+ tx_head->bytecount = xdp_get_buff_len(xdp);
+ tx_head->nr_frags = nr_frags;
+
+ /* update last descriptor from a frame with EOP */
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
+
+ xdp_ring->xdp_tx_active++;
+ xdp_ring->next_to_use = ntu;
+
return ICE_XDP_TX;
+
+dma_unmap:
+ for (;;) {
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ if (tx_buf == tx_head)
+ break;
+
+ if (!ntu)
+ ntu += cnt;
+ ntu--;
+ }
+ return ICE_XDP_CONSUMED;
}
/**
- * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
- * @xdp: XDP buffer
- * @xdp_ring: XDP Tx ring
- *
- * Returns negative on failure, 0 on success.
+ * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdpf: XDP frame that will be converted to XDP buff
+ * @xdp_ring: XDP ring for transmission
*/
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
+int ice_xmit_xdp_ring(struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring)
{
- struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
-
- if (unlikely(!xdpf))
- return ICE_XDP_CONSUMED;
+ struct xdp_buff xdp;
- return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ xdp_convert_frame_to_buff(xdpf, &xdp);
+ return __ice_xmit_xdp_ring(&xdp, xdp_ring);
}
/**
@@ -354,14 +419,21 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
* should be called when a batch of packets has been processed in the
* napi loop.
*/
-void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
+ u32 first_idx)
{
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
+
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
+ /* store index of descriptor with RS bit set in the first
+ * ice_tx_buf of given NAPI batch
+ */
+ tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index c7d2954dc9ea..ea977f283c22 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,6 +6,36 @@
#include "ice.h"
/**
+ * ice_set_rx_bufs_act - propagate Rx buffer action to frags
+ * @xdp: XDP buffer representing frame (linear and frags part)
+ * @rx_ring: Rx ring struct
+ * act: action to store onto Rx buffers related to XDP buffer parts
+ *
+ * Set action that should be taken before putting Rx buffer from first frag
+ * to one before last. Last one is handled by caller of this function as it
+ * is the EOP frag that is currently being processed. This function is
+ * supposed to be called only when XDP buffer contains frags.
+ */
+static inline void
+ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
+ const unsigned int act)
+{
+ const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 first = rx_ring->first_desc;
+ u32 nr_frags = sinfo->nr_frags;
+ u32 cnt = rx_ring->count;
+ struct ice_rx_buf *buf;
+
+ for (int i = 0; i < nr_frags; i++) {
+ buf = &rx_ring->rx_buf[first];
+ buf->act = act;
+
+ if (++first == cnt)
+ first = 0;
+ }
+}
+
+/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
@@ -21,6 +51,28 @@ ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
return !!(status_err_n & cpu_to_le16(stat_err_bits));
}
+/**
+ * ice_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
+ */
+static inline bool
+ice_is_non_eop(const struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ /* if we are the last buffer then there is nothing else to do */
+#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
+ if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
+ return false;
+
+ rx_ring->ring_stats->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
static inline __le64
ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
{
@@ -70,9 +122,28 @@ static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}
-void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res);
+/**
+ * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ *
+ * returns index of descriptor that had RS bit produced on
+ */
+static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
+{
+ u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct ice_tx_desc *tx_desc;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+
+ return rs_idx;
+}
+
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring);
+int ice_xmit_xdp_ring(struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring);
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 7105de6fb344..a25a68c69f22 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -598,6 +598,107 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
/**
+ * ice_clean_xdp_irq_zc - AF_XDP ZC specific Tx cleaning routine
+ * @xdp_ring: XDP Tx ring
+ */
+static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+{
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u16 cnt = xdp_ring->count;
+ struct ice_tx_buf *tx_buf;
+ u16 xsk_frames = 0;
+ u16 last_rs;
+ int i;
+
+ last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
+ tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
+ if (tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+ if (last_rs >= ntc)
+ xsk_frames = last_rs - ntc + 1;
+ else
+ xsk_frames = last_rs + cnt - ntc + 1;
+ }
+
+ if (!xsk_frames)
+ return;
+
+ if (likely(!xdp_ring->xdp_tx_active))
+ goto skip;
+
+ ntc = xdp_ring->next_to_clean;
+ for (i = 0; i < xsk_frames; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ if (tx_buf->xdp) {
+ xsk_buff_free(tx_buf->xdp);
+ xdp_ring->xdp_tx_active--;
+ } else {
+ xsk_frames++;
+ }
+
+ ntc++;
+ if (ntc == cnt)
+ ntc = 0;
+ }
+skip:
+ tx_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_to_clean += xsk_frames;
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+}
+
+/**
+ * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
+ * @xdp: XDP buffer to xmit
+ * @xdp_ring: XDP ring to produce descriptor onto
+ *
+ * note that this function works directly on xdp_buff, no need to convert
+ * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
+ * side will be able to xsk_buff_free() it.
+ *
+ * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
+ * was not enough space on XDP ring
+ */
+static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
+ struct ice_tx_ring *xdp_ring)
+{
+ u32 size = xdp->data_end - xdp->data;
+ u32 ntu = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+
+ if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) {
+ ice_clean_xdp_irq_zc(xdp_ring);
+ if (!ICE_DESC_UNUSED(xdp_ring)) {
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
+ return ICE_XDP_CONSUMED;
+ }
+ }
+
+ dma = xsk_buff_xdp_get_dma(xdp);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ tx_buf->xdp = xdp;
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, size, 0);
+ xdp_ring->xdp_tx_active++;
+
+ if (++ntu == xdp_ring->count)
+ ntu = 0;
+ xdp_ring->next_to_use = ntu;
+
+ return ICE_XDP_TX;
+}
+
+/**
* ice_run_xdp_zc - Executes an XDP program in zero-copy path
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
@@ -630,7 +731,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
break;
case XDP_TX:
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
@@ -760,7 +861,7 @@ construct_skb:
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
- ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
@@ -776,75 +877,6 @@ construct_skb:
}
/**
- * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
- * @xdp_ring: XDP Tx ring
- * @tx_buf: Tx buffer to clean
- */
-static void
-ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
-{
- page_frag_free(tx_buf->raw_buf);
- xdp_ring->xdp_tx_active--;
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
-}
-
-/**
- * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
- * @xdp_ring: XDP Tx ring
- */
-static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
-{
- u16 ntc = xdp_ring->next_to_clean;
- struct ice_tx_desc *tx_desc;
- u16 cnt = xdp_ring->count;
- struct ice_tx_buf *tx_buf;
- u16 xsk_frames = 0;
- u16 last_rs;
- int i;
-
- last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
- tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
- if ((tx_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
- if (last_rs >= ntc)
- xsk_frames = last_rs - ntc + 1;
- else
- xsk_frames = last_rs + cnt - ntc + 1;
- }
-
- if (!xsk_frames)
- return;
-
- if (likely(!xdp_ring->xdp_tx_active))
- goto skip;
-
- ntc = xdp_ring->next_to_clean;
- for (i = 0; i < xsk_frames; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
-
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
-
- ntc++;
- if (ntc >= xdp_ring->count)
- ntc = 0;
- }
-skip:
- tx_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_to_clean += xsk_frames;
- if (xdp_ring->next_to_clean >= cnt)
- xdp_ring->next_to_clean -= cnt;
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
-}
-
-/**
* ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
* @xdp_ring: XDP ring to produce the HW Tx descriptor on
* @desc: AF_XDP descriptor to pull the DMA address and length from
@@ -918,20 +950,6 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d
}
/**
- * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
- * @xdp_ring: XDP ring to produce the HW Tx descriptors on
- */
-static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
-{
- u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
- struct ice_tx_desc *tx_desc;
-
- tx_desc = ICE_TX_DESC(xdp_ring, ntu);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
-}
-
-/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
*
@@ -1065,8 +1083,8 @@ void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
while (ntc != ntu) {
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
- if (tx_buf->raw_buf)
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ if (tx_buf->xdp)
+ xsk_buff_free(tx_buf->xdp);
else
xsk_frames++;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 45fbd8346de7..087c950fea0b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2889,8 +2889,14 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
bpf_prog_put(old_prog);
/* bpf is just replaced, RXQ and MTU are already setup */
- if (!need_reset)
+ if (!need_reset) {
return 0;
+ } else {
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+ }
if (running)
igb_open(dev);
@@ -3333,6 +3339,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8cc55648764a..2928a6c73692 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6575,6 +6575,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= netdev->vlan_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index aeeb34e64610..e27af72aada8 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -29,6 +29,11 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
if (old_prog)
bpf_prog_put(old_prog);
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+
if (if_running)
igc_open(dev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 992b7ae75233..a3aaf051f9f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10301,6 +10301,8 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (err)
return -EINVAL;
+ if (!prog)
+ xdp_features_clear_redirect_target(dev);
} else {
for (i = 0; i < adapter->num_rx_queues; i++) {
WRITE_ONCE(adapter->rx_ring[i]->xdp_prog,
@@ -10321,6 +10323,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
+ xdp_features_set_redirect_target(dev, true);
}
return 0;
@@ -11016,6 +11019,9 @@ skip_sriov:
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
/* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ea0a230c1153..a44e4bd56142 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4634,6 +4634,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
/* MTU range: 68 - 1504 or 9710 */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a48588c80317..1cb4f59c0050 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5612,6 +5612,9 @@ static int mvneta_probe(struct platform_device *pdev)
NETIF_F_TSO | NETIF_F_RXCSUM;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 4da45c5abba5..9b4ecbe4f36d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6866,6 +6866,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->vlan_features |= features;
netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
+
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index c1ea60bc2630..179433d0a54a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2512,10 +2512,13 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
/* Network stack and XDP shared same rx queues.
* Use separate tx queues for XDP and network stack.
*/
- if (pf->xdp_prog)
+ if (pf->xdp_prog) {
pf->hw.xdp_queues = pf->hw.rx_queues;
- else
+ xdp_features_set_redirect_target(dev, false);
+ } else {
pf->hw.xdp_queues = 0;
+ xdp_features_clear_redirect_target(dev);
+ }
pf->hw.tot_tx_queues += pf->hw.xdp_queues;
@@ -2878,6 +2881,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7c5810497495..14be6ea51b88 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4451,6 +4451,12 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
register_netdevice_notifier(&mac->device_notifier);
}
+ if (mtk_page_pool_enabled(eth))
+ eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
return 0;
free_netdev:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index af4c4858f397..e11bc0ac880e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3416,6 +3416,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7499d64f6ec6..ec81d935262f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4741,6 +4741,13 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
if (old_prog)
bpf_prog_put(old_prog);
+ if (reset) {
+ if (prog)
+ xdp_features_set_redirect_target(netdev, true);
+ else
+ xdp_features_clear_redirect_target(netdev);
+ }
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
@@ -5136,6 +5143,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 2f6a048dee90..6120f2b6684f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2160,6 +2160,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->hw_features |= NETIF_F_RXHASH;
ndev->features = ndev->hw_features;
ndev->vlan_features = 0;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 18fc9971f1c8..e4825d885560 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2529,10 +2529,15 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ if (nn->app && nn->app->type->id == NFP_APP_BPF_NIC)
+ netdev->xdp_features |= NETDEV_XDP_ACT_HW_OFFLOAD;
+
/* Finalise the netdev setup */
switch (nn->dp.ops->version) {
case NFP_NFD_VER_NFD3:
netdev->netdev_ops = &nfp_nfd3_netdev_ops;
+ netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
break;
case NFP_NFD_VER_NFDK:
netdev->netdev_ops = &nfp_nfdk_netdev_ops;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 953f304b8588..b6d999927e86 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -892,6 +892,9 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->hw_features = hw_features;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
/* MTU range: 46 - 9600 */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3a86f1213a05..02c2adeb0a12 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1028,6 +1028,10 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
rc = efx_register_netdev(efx);
if (!rc)
return 0;
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
index 60e5b7c8ccf9..ef52ec71d197 100644
--- a/drivers/net/ethernet/sfc/siena/efx.c
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -1007,6 +1007,10 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
rc = efx_register_netdev(efx);
if (!rc)
return 0;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 9b46579b5a10..2d7347b71c41 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -2104,6 +2104,9 @@ static int netsec_probe(struct platform_device *pdev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
ndev->hw_features = ndev->features;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
priv->rx_cksum_offload_flag = true;
ret = netsec_register_mdio(priv, phy_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f44e4e4b4f16..868f59ec8439 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -7153,6 +7153,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
ret = stmmac_tc_init(priv, priv);
if (!ret) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 13c9c2d6b79b..37f0b62ec5d6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1458,6 +1458,8 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -1635,6 +1637,8 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw->slaves[0].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 83596ec0c7cb..35128dd45ffc 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1405,6 +1405,10 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f9b219e6cd58..a9b139bbdb2c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2559,6 +2559,8 @@ static int netvsc_probe(struct hv_device *dev,
netdev_lockdep_set_classes(net);
+ net->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 6db6a75ff9b9..35fa1ca98671 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -286,6 +286,7 @@ static void nsim_setup(struct net_device *dev)
NETIF_F_TSO;
dev->hw_features |= NETIF_F_HW_TC;
dev->max_mtu = ETH_MAX_MTU;
+ dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
}
static int nsim_init_netdevsim(struct netdevsim *ns)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 745131b2d6db..ad653b32b2f0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1401,6 +1401,11 @@ static void tun_net_initialize(struct net_device *dev)
eth_hw_addr_random(dev);
+ /* Currently tun does not support XDP, only tap does. */
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
break;
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ba3e05832843..1bb54de7124d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1686,6 +1686,10 @@ static void veth_setup(struct net_device *dev)
dev->hw_enc_features = VETH_FEATURES;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
}
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5256fdd55547..fb5e68ed3ec2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3283,7 +3283,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (i == 0 && !old_prog)
virtnet_clear_guest_offloads(vi);
}
+ if (!old_prog)
+ xdp_features_set_redirect_target(dev, true);
} else {
+ xdp_features_clear_redirect_target(dev);
vi->xdp_enabled = false;
}
@@ -3919,6 +3922,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->hw_features |= NETIF_F_GRO_HW;
dev->vlan_features = dev->features;
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
/* MTU range: 68 - 65535 */
dev->min_mtu = MIN_MTU;
@@ -3947,8 +3951,10 @@ static int virtnet_probe(struct virtio_device *vdev)
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
spin_lock_init(&vi->refill_lock);
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
vi->mergeable_rx_bufs = true;
+ dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
+ }
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
vi->rx_usecs = 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 12b074286df9..47d54d8ea59d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1741,6 +1741,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
* negotiate with the backend regarding supported features.
*/
netdev->features |= netdev->hw_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
netdev->ethtool_ops = &xennet_ethtool_ops;
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f7f24defccb8..35c18a98c21a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -899,8 +899,12 @@ enum bpf_cgroup_storage_type {
/* The argument is a structure. */
#define BTF_FMODEL_STRUCT_ARG BIT(0)
+/* The argument is signed. */
+#define BTF_FMODEL_SIGNED_ARG BIT(1)
+
struct btf_func_model {
u8 ret_size;
+ u8 ret_flags;
u8 nr_args;
u8 arg_size[MAX_BPF_FUNC_ARGS];
u8 arg_flags[MAX_BPF_FUNC_ARGS];
@@ -939,7 +943,13 @@ struct btf_func_model {
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86.
*/
-#define BPF_MAX_TRAMP_LINKS 38
+enum {
+#if defined(__s390x__)
+ BPF_MAX_TRAMP_LINKS = 27,
+#else
+ BPF_MAX_TRAMP_LINKS = 38,
+#endif
+};
struct bpf_tramp_links {
struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
@@ -1836,7 +1846,7 @@ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_free_id(struct bpf_prog *prog);
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+void bpf_map_free_id(struct bpf_map *map);
struct btf_field *btf_record_find(const struct btf_record *rec,
u32 offset, enum btf_field_type type);
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 5f628f323442..49e0fe6d8274 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -73,6 +73,14 @@
#define KF_RCU (1 << 7) /* kfunc only takes rcu pointer arguments */
/*
+ * Tag marking a kernel function as a kfunc. This is meant to minimize the
+ * amount of copy-paste that kfunc authors have to include for correctness so
+ * as to avoid issues such as the compiler inlining or eliding either a static
+ * kfunc, or a global kfunc in an LTO build.
+ */
+#define __bpf_kfunc __used noinline
+
+/*
* Return the name of the passed struct, if exists, or halt the build if for
* example the structure gets renamed. In this way, developers have to revisit
* the code using that structure name, and update it accordingly.
@@ -236,6 +244,16 @@ static inline bool btf_type_is_small_int(const struct btf_type *t)
return btf_type_is_int(t) && t->size <= sizeof(u64);
}
+static inline u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_signed_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && (btf_int_encoding(t) & BTF_INT_SIGNED);
+}
+
static inline bool btf_type_is_enum(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
@@ -306,11 +324,6 @@ static inline u8 btf_int_offset(const struct btf_type *t)
return BTF_INT_OFFSET(*(u32 *)(t + 1));
}
-static inline u8 btf_int_encoding(const struct btf_type *t)
-{
- return BTF_INT_ENCODING(*(u32 *)(t + 1));
-}
-
static inline bool btf_type_is_scalar(const struct btf_type *t)
{
return btf_type_is_int(t) || btf_type_is_enum(t);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 38ab96ae0d68..d9cdbc047b49 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -47,6 +47,7 @@
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
+#include <uapi/linux/netdev.h>
#include <linux/hashtable.h>
#include <linux/rbtree.h>
#include <net/net_trackers.h>
@@ -1819,6 +1820,7 @@ enum netdev_ml_priv_type {
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
+ * @xdp_features: XDP capability supported by the device
* @priv_flags: Like 'flags' but invisible to userspace,
* see if.h for the definitions
* @gflags: Global flags ( kept as legacy )
@@ -2060,6 +2062,7 @@ struct net_device {
/* Read-mostly cache-line for fast-path access */
unsigned int flags;
+ xdp_features_t xdp_features;
unsigned long long priv_flags;
const struct net_device_ops *netdev_ops;
const struct xdp_metadata_ops *xdp_metadata_ops;
@@ -2846,6 +2849,7 @@ enum netdev_cmd {
NETDEV_OFFLOAD_XSTATS_DISABLE,
NETDEV_OFFLOAD_XSTATS_REPORT_USED,
NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
+ NETDEV_XDP_FEAT_CHANGE,
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 91292aa13bc0..d517bfac937b 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -7,6 +7,7 @@
#define __LINUX_NET_XDP_H__
#include <linux/skbuff.h> /* skb_shared_info */
+#include <uapi/linux/netdev.h>
/**
* DOC: XDP RX-queue information
@@ -43,6 +44,8 @@ enum xdp_mem_type {
MEM_TYPE_MAX,
};
+typedef u32 xdp_features_t;
+
/* XDP flags for ndo_xdp_xmit */
#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */
#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
@@ -425,9 +428,21 @@ MAX_XDP_METADATA_KFUNC,
#ifdef CONFIG_NET
u32 bpf_xdp_metadata_kfunc_id(int id);
bool bpf_dev_bound_kfunc_id(u32 btf_id);
+void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
+void xdp_features_clear_redirect_target(struct net_device *dev);
#else
static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
+
+static inline void
+xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
+{
+}
+
+static inline void
+xdp_features_clear_redirect_target(struct net_device *dev)
+{
+}
#endif
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index ba0f0cfb5e42..17afd2b35ee5 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2801,7 +2801,7 @@ union bpf_attr {
*
* long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
- * For en eBPF program attached to a perf event, retrieve the
+ * For an eBPF program attached to a perf event, retrieve the
* value of the event counter associated to *ctx* and store it in
* the structure pointed by *buf* and of size *buf_size*. Enabled
* and running times are also stored in the structure (see
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
new file mode 100644
index 000000000000..9ee459872600
--- /dev/null
+++ b/include/uapi/linux/netdev.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NETDEV_H
+#define _UAPI_LINUX_NETDEV_H
+
+#define NETDEV_FAMILY_NAME "netdev"
+#define NETDEV_FAMILY_VERSION 1
+
+/**
+ * enum netdev_xdp_act
+ * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers
+ * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
+ * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
+ * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
+ * ndo_xdp_xmit callback.
+ * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP
+ * in zero copy mode.
+ * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw
+ * oflloading.
+ * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear
+ * XDP buffer support in the driver napi callback.
+ * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements
+ * non-linear XDP buffer support in ndo_xdp_xmit callback.
+ */
+enum netdev_xdp_act {
+ NETDEV_XDP_ACT_BASIC = 1,
+ NETDEV_XDP_ACT_REDIRECT = 2,
+ NETDEV_XDP_ACT_NDO_XMIT = 4,
+ NETDEV_XDP_ACT_XSK_ZEROCOPY = 8,
+ NETDEV_XDP_ACT_HW_OFFLOAD = 16,
+ NETDEV_XDP_ACT_RX_SG = 32,
+ NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+};
+
+enum {
+ NETDEV_A_DEV_IFINDEX = 1,
+ NETDEV_A_DEV_PAD,
+ NETDEV_A_DEV_XDP_FEATURES,
+
+ __NETDEV_A_DEV_MAX,
+ NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
+};
+
+enum {
+ NETDEV_CMD_DEV_GET = 1,
+ NETDEV_CMD_DEV_ADD_NTF,
+ NETDEV_CMD_DEV_DEL_NTF,
+ NETDEV_CMD_DEV_CHANGE_NTF,
+
+ __NETDEV_CMD_MAX,
+ NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
+};
+
+#define NETDEV_MCGRP_MGMT "mgmt"
+
+#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 5b3cb8068b4d..740bdb045b14 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -6453,6 +6453,18 @@ static int __get_type_size(struct btf *btf, u32 btf_id,
return -EINVAL;
}
+static u8 __get_type_fmodel_flags(const struct btf_type *t)
+{
+ u8 flags = 0;
+
+ if (__btf_type_is_struct(t))
+ flags |= BTF_FMODEL_STRUCT_ARG;
+ if (btf_type_is_signed_int(t))
+ flags |= BTF_FMODEL_SIGNED_ARG;
+
+ return flags;
+}
+
int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
const struct btf_type *func,
@@ -6473,6 +6485,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
m->arg_flags[i] = 0;
}
m->ret_size = 8;
+ m->ret_flags = 0;
m->nr_args = MAX_BPF_FUNC_REG_ARGS;
return 0;
}
@@ -6492,6 +6505,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
return -EINVAL;
}
m->ret_size = ret;
+ m->ret_flags = __get_type_fmodel_flags(t);
for (i = 0; i < nargs; i++) {
if (i == nargs - 1 && args[i].type == 0) {
@@ -6516,7 +6530,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
return -EINVAL;
}
m->arg_size[i] = ret;
- m->arg_flags[i] = __btf_type_is_struct(t) ? BTF_FMODEL_STRUCT_ARG : 0;
+ m->arg_flags[i] = __get_type_fmodel_flags(t);
}
m->nr_args = nargs;
return 0;
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
index 25355a0a367a..52b981512a35 100644
--- a/kernel/bpf/cpumask.c
+++ b/kernel/bpf/cpumask.c
@@ -48,10 +48,13 @@ __diag_ignore_all("-Wmissing-prototypes",
* bpf_cpumask_create() allocates memory using the BPF memory allocator, and
* will not block. It may return NULL if no memory is available.
*/
-struct bpf_cpumask *bpf_cpumask_create(void)
+__bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void)
{
struct bpf_cpumask *cpumask;
+ /* cpumask must be the first element so struct bpf_cpumask be cast to struct cpumask. */
+ BUILD_BUG_ON(offsetof(struct bpf_cpumask, cpumask) != 0);
+
cpumask = bpf_mem_alloc(&bpf_cpumask_ma, sizeof(*cpumask));
if (!cpumask)
return NULL;
@@ -71,7 +74,7 @@ struct bpf_cpumask *bpf_cpumask_create(void)
* must either be embedded in a map as a kptr, or freed with
* bpf_cpumask_release().
*/
-struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
+__bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
{
refcount_inc(&cpumask->usage);
return cpumask;
@@ -87,7 +90,7 @@ struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
* kptr, or freed with bpf_cpumask_release(). This function may return NULL if
* no BPF cpumask was found in the specified map value.
*/
-struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
+__bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
{
struct bpf_cpumask *cpumask;
@@ -113,7 +116,7 @@ struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
* reference of the BPF cpumask has been released, it is subsequently freed in
* an RCU callback in the BPF memory allocator.
*/
-void bpf_cpumask_release(struct bpf_cpumask *cpumask)
+__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
{
if (!cpumask)
return;
@@ -132,7 +135,7 @@ void bpf_cpumask_release(struct bpf_cpumask *cpumask)
* Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask
* pointer may be safely passed to this function.
*/
-u32 bpf_cpumask_first(const struct cpumask *cpumask)
+__bpf_kfunc u32 bpf_cpumask_first(const struct cpumask *cpumask)
{
return cpumask_first(cpumask);
}
@@ -145,7 +148,7 @@ u32 bpf_cpumask_first(const struct cpumask *cpumask)
* Find the index of the first unset bit of the cpumask. A struct bpf_cpumask
* pointer may be safely passed to this function.
*/
-u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
+__bpf_kfunc u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
{
return cpumask_first_zero(cpumask);
}
@@ -155,7 +158,7 @@ u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
* @cpu: The CPU to be set in the cpumask.
* @cpumask: The BPF cpumask in which a bit is being set.
*/
-void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
+__bpf_kfunc void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return;
@@ -168,7 +171,7 @@ void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
* @cpu: The CPU to be cleared from the cpumask.
* @cpumask: The BPF cpumask in which a bit is being cleared.
*/
-void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
+__bpf_kfunc void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return;
@@ -185,7 +188,7 @@ void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
* * true - @cpu is set in the cpumask
* * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu.
*/
-bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
+__bpf_kfunc bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
{
if (!cpu_valid(cpu))
return false;
@@ -202,7 +205,7 @@ bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
* * true - @cpu is set in the cpumask
* * false - @cpu was not set in the cpumask, or @cpu is invalid.
*/
-bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
+__bpf_kfunc bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return false;
@@ -220,7 +223,7 @@ bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
* * true - @cpu is set in the cpumask
* * false - @cpu was not set in the cpumask, or @cpu is invalid.
*/
-bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
+__bpf_kfunc bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return false;
@@ -232,7 +235,7 @@ bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
* bpf_cpumask_setall() - Set all of the bits in a BPF cpumask.
* @cpumask: The BPF cpumask having all of its bits set.
*/
-void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
+__bpf_kfunc void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
{
cpumask_setall((struct cpumask *)cpumask);
}
@@ -241,7 +244,7 @@ void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
* bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask.
* @cpumask: The BPF cpumask being cleared.
*/
-void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
+__bpf_kfunc void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
{
cpumask_clear((struct cpumask *)cpumask);
}
@@ -258,9 +261,9 @@ void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
-bool bpf_cpumask_and(struct bpf_cpumask *dst,
- const struct cpumask *src1,
- const struct cpumask *src2)
+__bpf_kfunc bool bpf_cpumask_and(struct bpf_cpumask *dst,
+ const struct cpumask *src1,
+ const struct cpumask *src2)
{
return cpumask_and((struct cpumask *)dst, src1, src2);
}
@@ -273,9 +276,9 @@ bool bpf_cpumask_and(struct bpf_cpumask *dst,
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
-void bpf_cpumask_or(struct bpf_cpumask *dst,
- const struct cpumask *src1,
- const struct cpumask *src2)
+__bpf_kfunc void bpf_cpumask_or(struct bpf_cpumask *dst,
+ const struct cpumask *src1,
+ const struct cpumask *src2)
{
cpumask_or((struct cpumask *)dst, src1, src2);
}
@@ -288,9 +291,9 @@ void bpf_cpumask_or(struct bpf_cpumask *dst,
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
-void bpf_cpumask_xor(struct bpf_cpumask *dst,
- const struct cpumask *src1,
- const struct cpumask *src2)
+__bpf_kfunc void bpf_cpumask_xor(struct bpf_cpumask *dst,
+ const struct cpumask *src1,
+ const struct cpumask *src2)
{
cpumask_xor((struct cpumask *)dst, src1, src2);
}
@@ -306,7 +309,7 @@ void bpf_cpumask_xor(struct bpf_cpumask *dst,
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
-bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
+__bpf_kfunc bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_equal(src1, src2);
}
@@ -322,7 +325,7 @@ bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
-bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2)
+__bpf_kfunc bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_intersects(src1, src2);
}
@@ -338,7 +341,7 @@ bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *sr
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
-bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
+__bpf_kfunc bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_subset(src1, src2);
}
@@ -353,7 +356,7 @@ bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
*
* A struct bpf_cpumask pointer may be safely passed to @cpumask.
*/
-bool bpf_cpumask_empty(const struct cpumask *cpumask)
+__bpf_kfunc bool bpf_cpumask_empty(const struct cpumask *cpumask)
{
return cpumask_empty(cpumask);
}
@@ -368,7 +371,7 @@ bool bpf_cpumask_empty(const struct cpumask *cpumask)
*
* A struct bpf_cpumask pointer may be safely passed to @cpumask.
*/
-bool bpf_cpumask_full(const struct cpumask *cpumask)
+__bpf_kfunc bool bpf_cpumask_full(const struct cpumask *cpumask)
{
return cpumask_full(cpumask);
}
@@ -380,7 +383,7 @@ bool bpf_cpumask_full(const struct cpumask *cpumask)
*
* A struct bpf_cpumask pointer may be safely passed to @src.
*/
-void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
+__bpf_kfunc void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
{
cpumask_copy((struct cpumask *)dst, src);
}
@@ -395,7 +398,7 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
*
* A struct bpf_cpumask pointer may be safely passed to @src.
*/
-u32 bpf_cpumask_any(const struct cpumask *cpumask)
+__bpf_kfunc u32 bpf_cpumask_any(const struct cpumask *cpumask)
{
return cpumask_any(cpumask);
}
@@ -412,7 +415,7 @@ u32 bpf_cpumask_any(const struct cpumask *cpumask)
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
-u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2)
+__bpf_kfunc u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_any_and(src1, src2);
}
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d01e4c55b376..2675fefc6cb6 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -474,7 +474,11 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
{
int err;
- if (!dev->netdev_ops->ndo_xdp_xmit)
+ if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
+ return -EOPNOTSUPP;
+
+ if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
+ xdp_frame_has_frags(xdpf)))
return -EOPNOTSUPP;
err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
@@ -532,8 +536,14 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
{
- if (!obj ||
- !obj->dev->netdev_ops->ndo_xdp_xmit)
+ if (!obj)
+ return false;
+
+ if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
+ return false;
+
+ if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
+ xdp_frame_has_frags(xdpf)))
return false;
if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 458db2db2f81..2dae44581922 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1776,7 +1776,7 @@ __diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in vmlinux BTF");
-void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
+__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
{
struct btf_struct_meta *meta = meta__ign;
u64 size = local_type_id__k;
@@ -1790,7 +1790,7 @@ void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
return p;
}
-void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
+__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
{
struct btf_struct_meta *meta = meta__ign;
void *p = p__alloc;
@@ -1811,12 +1811,12 @@ static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *hea
tail ? list_add_tail(n, h) : list_add(n, h);
}
-void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
+__bpf_kfunc void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
{
return __bpf_list_add(node, head, false);
}
-void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
+__bpf_kfunc void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
{
return __bpf_list_add(node, head, true);
}
@@ -1834,12 +1834,12 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai
return (struct bpf_list_node *)n;
}
-struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
+__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
{
return __bpf_list_del(head, false);
}
-struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
+__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
{
return __bpf_list_del(head, true);
}
@@ -1850,7 +1850,7 @@ struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
* bpf_task_release().
* @p: The task on which a reference is being acquired.
*/
-struct task_struct *bpf_task_acquire(struct task_struct *p)
+__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
{
return get_task_struct(p);
}
@@ -1861,7 +1861,7 @@ struct task_struct *bpf_task_acquire(struct task_struct *p)
* released by calling bpf_task_release().
* @p: The task on which a reference is being acquired.
*/
-struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
+__bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
{
/* For the time being this function returns NULL, as it's not currently
* possible to safely acquire a reference to a task with RCU protection
@@ -1913,7 +1913,7 @@ struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
* be released by calling bpf_task_release().
* @pp: A pointer to a task kptr on which a reference is being acquired.
*/
-struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
+__bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
{
/* We must return NULL here until we have clarity on how to properly
* leverage RCU for ensuring a task's lifetime. See the comment above
@@ -1926,7 +1926,7 @@ struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
* bpf_task_release - Release the reference acquired on a task.
* @p: The task on which a reference is being released.
*/
-void bpf_task_release(struct task_struct *p)
+__bpf_kfunc void bpf_task_release(struct task_struct *p)
{
if (!p)
return;
@@ -1941,7 +1941,7 @@ void bpf_task_release(struct task_struct *p)
* calling bpf_cgroup_release().
* @cgrp: The cgroup on which a reference is being acquired.
*/
-struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
+__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
{
cgroup_get(cgrp);
return cgrp;
@@ -1953,7 +1953,7 @@ struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
* be released by calling bpf_cgroup_release().
* @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired.
*/
-struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
+__bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
{
struct cgroup *cgrp;
@@ -1985,7 +1985,7 @@ struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
* drops to 0.
* @cgrp: The cgroup on which a reference is being released.
*/
-void bpf_cgroup_release(struct cgroup *cgrp)
+__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
{
if (!cgrp)
return;
@@ -2000,7 +2000,7 @@ void bpf_cgroup_release(struct cgroup *cgrp)
* @cgrp: The cgroup for which we're performing a lookup.
* @level: The level of ancestor to look up.
*/
-struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
+__bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
{
struct cgroup *ancestor;
@@ -2019,7 +2019,7 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
* stored in a map, or released with bpf_task_release().
* @pid: The pid of the task being looked up.
*/
-struct task_struct *bpf_task_from_pid(s32 pid)
+__bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
{
struct task_struct *p;
@@ -2032,22 +2032,22 @@ struct task_struct *bpf_task_from_pid(s32 pid)
return p;
}
-void *bpf_cast_to_kern_ctx(void *obj)
+__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
{
return obj;
}
-void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
+__bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
{
return obj__ign;
}
-void bpf_rcu_read_lock(void)
+__bpf_kfunc void bpf_rcu_read_lock(void)
{
rcu_read_lock();
}
-void bpf_rcu_read_unlock(void)
+__bpf_kfunc void bpf_rcu_read_unlock(void)
{
rcu_read_unlock();
}
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 88aae38fde66..0c85e06f7ea7 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -136,7 +136,7 @@ static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
{
WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
- bpf_map_free_id(&offmap->map, true);
+ bpf_map_free_id(&offmap->map);
list_del_init(&offmap->offloads);
offmap->netdev = NULL;
}
diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
index 5106b5372f0c..b56f9f3314fd 100644
--- a/kernel/bpf/preload/bpf_preload_kern.c
+++ b/kernel/bpf/preload/bpf_preload_kern.c
@@ -3,7 +3,11 @@
#include <linux/init.h>
#include <linux/module.h>
#include "bpf_preload.h"
-#include "iterators/iterators.lskel.h"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#include "iterators/iterators.lskel-little-endian.h"
+#else
+#include "iterators/iterators.lskel-big-endian.h"
+#endif
static struct bpf_link *maps_link, *progs_link;
static struct iterators_bpf *skel;
diff --git a/kernel/bpf/preload/iterators/Makefile b/kernel/bpf/preload/iterators/Makefile
index 6762b1260f2f..8937dc6bc8d0 100644
--- a/kernel/bpf/preload/iterators/Makefile
+++ b/kernel/bpf/preload/iterators/Makefile
@@ -35,20 +35,22 @@ endif
.PHONY: all clean
-all: iterators.lskel.h
+all: iterators.lskel-little-endian.h
+
+big: iterators.lskel-big-endian.h
clean:
$(call msg,CLEAN)
$(Q)rm -rf $(OUTPUT) iterators
-iterators.lskel.h: $(OUTPUT)/iterators.bpf.o | $(BPFTOOL)
+iterators.lskel-%.h: $(OUTPUT)/%/iterators.bpf.o | $(BPFTOOL)
$(call msg,GEN-SKEL,$@)
$(Q)$(BPFTOOL) gen skeleton -L $< > $@
-
-$(OUTPUT)/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT)
+$(OUTPUT)/%/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT)
$(call msg,BPF,$@)
- $(Q)$(CLANG) -g -O2 -target bpf $(INCLUDES) \
+ $(Q)mkdir -p $(@D)
+ $(Q)$(CLANG) -g -O2 -target bpf -m$* $(INCLUDES) \
-c $(filter %.c,$^) -o $@ && \
$(LLVM_STRIP) -g $@
diff --git a/kernel/bpf/preload/iterators/README b/kernel/bpf/preload/iterators/README
index 7fd6d39a9ad2..98e7c90ea012 100644
--- a/kernel/bpf/preload/iterators/README
+++ b/kernel/bpf/preload/iterators/README
@@ -1,4 +1,7 @@
WARNING:
-If you change "iterators.bpf.c" do "make -j" in this directory to rebuild "iterators.skel.h".
+If you change "iterators.bpf.c" do "make -j" in this directory to
+rebuild "iterators.lskel-little-endian.h". Then, on a big-endian
+machine, do "make -j big" in this directory to rebuild
+"iterators.lskel-big-endian.h". Commit both resulting headers.
Make sure to have clang 10 installed.
See Documentation/bpf/bpf_devel_QA.rst
diff --git a/kernel/bpf/preload/iterators/iterators.lskel-big-endian.h b/kernel/bpf/preload/iterators/iterators.lskel-big-endian.h
new file mode 100644
index 000000000000..ebdc6c0cdb70
--- /dev/null
+++ b/kernel/bpf/preload/iterators/iterators.lskel-big-endian.h
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */
+#ifndef __ITERATORS_BPF_SKEL_H__
+#define __ITERATORS_BPF_SKEL_H__
+
+#include <bpf/skel_internal.h>
+
+struct iterators_bpf {
+ struct bpf_loader_ctx ctx;
+ struct {
+ struct bpf_map_desc rodata;
+ } maps;
+ struct {
+ struct bpf_prog_desc dump_bpf_map;
+ struct bpf_prog_desc dump_bpf_prog;
+ } progs;
+ struct {
+ int dump_bpf_map_fd;
+ int dump_bpf_prog_fd;
+ } links;
+};
+
+static inline int
+iterators_bpf__dump_bpf_map__attach(struct iterators_bpf *skel)
+{
+ int prog_fd = skel->progs.dump_bpf_map.prog_fd;
+ int fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);
+
+ if (fd > 0)
+ skel->links.dump_bpf_map_fd = fd;
+ return fd;
+}
+
+static inline int
+iterators_bpf__dump_bpf_prog__attach(struct iterators_bpf *skel)
+{
+ int prog_fd = skel->progs.dump_bpf_prog.prog_fd;
+ int fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);
+
+ if (fd > 0)
+ skel->links.dump_bpf_prog_fd = fd;
+ return fd;
+}
+
+static inline int
+iterators_bpf__attach(struct iterators_bpf *skel)
+{
+ int ret = 0;
+
+ ret = ret < 0 ? ret : iterators_bpf__dump_bpf_map__attach(skel);
+ ret = ret < 0 ? ret : iterators_bpf__dump_bpf_prog__attach(skel);
+ return ret < 0 ? ret : 0;
+}
+
+static inline void
+iterators_bpf__detach(struct iterators_bpf *skel)
+{
+ skel_closenz(skel->links.dump_bpf_map_fd);
+ skel_closenz(skel->links.dump_bpf_prog_fd);
+}
+static void
+iterators_bpf__destroy(struct iterators_bpf *skel)
+{
+ if (!skel)
+ return;
+ iterators_bpf__detach(skel);
+ skel_closenz(skel->progs.dump_bpf_map.prog_fd);
+ skel_closenz(skel->progs.dump_bpf_prog.prog_fd);
+ skel_closenz(skel->maps.rodata.map_fd);
+ skel_free(skel);
+}
+static inline struct iterators_bpf *
+iterators_bpf__open(void)
+{
+ struct iterators_bpf *skel;
+
+ skel = skel_alloc(sizeof(*skel));
+ if (!skel)
+ goto cleanup;
+ skel->ctx.sz = (void *)&skel->links - (void *)skel;
+ return skel;
+cleanup:
+ iterators_bpf__destroy(skel);
+ return NULL;
+}
+
+static inline int
+iterators_bpf__load(struct iterators_bpf *skel)
+{
+ struct bpf_load_and_run_opts opts = {};
+ int err;
+
+ opts.ctx = (struct bpf_loader_ctx *)skel;
+ opts.data_sz = 6008;
+ opts.data = (void *)"\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xeb\x9f\x01\0\
+\0\0\0\x18\0\0\0\0\0\0\x04\x1c\0\0\x04\x1c\0\0\x05\x18\0\0\0\0\x02\0\0\0\0\0\0\
+\x02\0\0\0\x01\x04\0\0\x02\0\0\0\x10\0\0\0\x13\0\0\0\x03\0\0\0\0\0\0\0\x18\0\0\
+\0\x04\0\0\0\x40\0\0\0\0\x02\0\0\0\0\0\0\x08\0\0\0\0\x02\0\0\0\0\0\0\x0d\0\0\0\
+\0\x0d\0\0\x01\0\0\0\x06\0\0\0\x1c\0\0\0\x01\0\0\0\x20\x01\0\0\0\0\0\0\x04\x01\
+\0\0\x20\0\0\0\x24\x0c\0\0\x01\0\0\0\x05\0\0\0\xc2\x04\0\0\x03\0\0\0\x18\0\0\0\
+\xd0\0\0\0\x09\0\0\0\0\0\0\0\xd4\0\0\0\x0b\0\0\0\x40\0\0\0\xdf\0\0\0\x0b\0\0\0\
+\x80\0\0\0\0\x02\0\0\0\0\0\0\x0a\0\0\0\xe7\x07\0\0\0\0\0\0\0\0\0\0\xf0\x08\0\0\
+\0\0\0\0\x0c\0\0\0\xf6\x01\0\0\0\0\0\0\x08\0\0\0\x40\0\0\x01\xb3\x04\0\0\x03\0\
+\0\0\x18\0\0\x01\xbb\0\0\0\x0e\0\0\0\0\0\0\x01\xbe\0\0\0\x11\0\0\0\x20\0\0\x01\
+\xc3\0\0\0\x0e\0\0\0\xa0\0\0\x01\xcf\x08\0\0\0\0\0\0\x0f\0\0\x01\xd5\x01\0\0\0\
+\0\0\0\x04\0\0\0\x20\0\0\x01\xe2\x01\0\0\0\0\0\0\x01\x01\0\0\x08\0\0\0\0\x03\0\
+\0\0\0\0\0\0\0\0\0\x10\0\0\0\x12\0\0\0\x10\0\0\x01\xe7\x01\0\0\0\0\0\0\x04\0\0\
+\0\x20\0\0\0\0\x02\0\0\0\0\0\0\x14\0\0\x02\x4b\x04\0\0\x02\0\0\0\x10\0\0\0\x13\
+\0\0\0\x03\0\0\0\0\0\0\x02\x5e\0\0\0\x15\0\0\0\x40\0\0\0\0\x02\0\0\0\0\0\0\x18\
+\0\0\0\0\x0d\0\0\x01\0\0\0\x06\0\0\0\x1c\0\0\0\x13\0\0\x02\x63\x0c\0\0\x01\0\0\
+\0\x16\0\0\x02\xaf\x04\0\0\x01\0\0\0\x08\0\0\x02\xb8\0\0\0\x19\0\0\0\0\0\0\0\0\
+\x02\0\0\0\0\0\0\x1a\0\0\x03\x09\x04\0\0\x06\0\0\0\x38\0\0\x01\xbb\0\0\0\x0e\0\
+\0\0\0\0\0\x01\xbe\0\0\0\x11\0\0\0\x20\0\0\x03\x16\0\0\0\x1b\0\0\0\xc0\0\0\x03\
+\x27\0\0\0\x15\0\0\x01\0\0\0\x03\x30\0\0\0\x1d\0\0\x01\x40\0\0\x03\x3a\0\0\0\
+\x1e\0\0\x01\x80\0\0\0\0\x02\0\0\0\0\0\0\x1c\0\0\0\0\x0a\0\0\0\0\0\0\x10\0\0\0\
+\0\x02\0\0\0\0\0\0\x1f\0\0\0\0\x02\0\0\0\0\0\0\x20\0\0\x03\x84\x04\0\0\x02\0\0\
+\0\x08\0\0\x03\x92\0\0\0\x0e\0\0\0\0\0\0\x03\x9b\0\0\0\x0e\0\0\0\x20\0\0\x03\
+\x3a\x04\0\0\x03\0\0\0\x18\0\0\x03\xa5\0\0\0\x1b\0\0\0\0\0\0\x03\xad\0\0\0\x21\
+\0\0\0\x40\0\0\x03\xb3\0\0\0\x23\0\0\0\x80\0\0\0\0\x02\0\0\0\0\0\0\x22\0\0\0\0\
+\x02\0\0\0\0\0\0\x24\0\0\x03\xb7\x04\0\0\x01\0\0\0\x04\0\0\x03\xc2\0\0\0\x0e\0\
+\0\0\0\0\0\x04\x2b\x04\0\0\x01\0\0\0\x04\0\0\x04\x34\0\0\0\x0e\0\0\0\0\0\0\0\0\
+\x03\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\x12\0\0\0\x23\0\0\x04\xaa\x0e\0\0\0\0\0\0\
+\x25\0\0\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\x12\0\0\0\x0e\0\0\x04\
+\xbe\x0e\0\0\0\0\0\0\x27\0\0\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\x12\
+\0\0\0\x20\0\0\x04\xd4\x0e\0\0\0\0\0\0\x29\0\0\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\
+\0\0\x1c\0\0\0\x12\0\0\0\x11\0\0\x04\xe9\x0e\0\0\0\0\0\0\x2b\0\0\0\0\0\0\0\0\
+\x03\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\x12\0\0\0\x04\0\0\x05\0\x0e\0\0\0\0\0\0\x2d\
+\0\0\0\x01\0\0\x05\x08\x0f\0\0\x04\0\0\0\x62\0\0\0\x26\0\0\0\0\0\0\0\x23\0\0\0\
+\x28\0\0\0\x23\0\0\0\x0e\0\0\0\x2a\0\0\0\x31\0\0\0\x20\0\0\0\x2c\0\0\0\x51\0\0\
+\0\x11\0\0\x05\x10\x0f\0\0\x01\0\0\0\x04\0\0\0\x2e\0\0\0\0\0\0\0\x04\0\x62\x70\
+\x66\x5f\x69\x74\x65\x72\x5f\x5f\x62\x70\x66\x5f\x6d\x61\x70\0\x6d\x65\x74\x61\
+\0\x6d\x61\x70\0\x63\x74\x78\0\x69\x6e\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\
+\x5f\x6d\x61\x70\0\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x6d\x61\x70\0\x30\x3a\
+\x30\0\x2f\x68\x6f\x6d\x65\x2f\x69\x69\x69\x2f\x6c\x69\x6e\x75\x78\x2d\x6b\x65\
+\x72\x6e\x65\x6c\x2d\x74\x6f\x6f\x6c\x63\x68\x61\x69\x6e\x2f\x73\x72\x63\x2f\
+\x6c\x69\x6e\x75\x78\x2f\x6b\x65\x72\x6e\x65\x6c\x2f\x62\x70\x66\x2f\x70\x72\
+\x65\x6c\x6f\x61\x64\x2f\x69\x74\x65\x72\x61\x74\x6f\x72\x73\x2f\x69\x74\x65\
+\x72\x61\x74\x6f\x72\x73\x2e\x62\x70\x66\x2e\x63\0\x09\x73\x74\x72\x75\x63\x74\
+\x20\x73\x65\x71\x5f\x66\x69\x6c\x65\x20\x2a\x73\x65\x71\x20\x3d\x20\x63\x74\
+\x78\x2d\x3e\x6d\x65\x74\x61\x2d\x3e\x73\x65\x71\x3b\0\x62\x70\x66\x5f\x69\x74\
+\x65\x72\x5f\x6d\x65\x74\x61\0\x73\x65\x71\0\x73\x65\x73\x73\x69\x6f\x6e\x5f\
+\x69\x64\0\x73\x65\x71\x5f\x6e\x75\x6d\0\x73\x65\x71\x5f\x66\x69\x6c\x65\0\x5f\
+\x5f\x75\x36\x34\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x6c\x6f\x6e\x67\x20\x6c\
+\x6f\x6e\x67\0\x30\x3a\x31\0\x09\x73\x74\x72\x75\x63\x74\x20\x62\x70\x66\x5f\
+\x6d\x61\x70\x20\x2a\x6d\x61\x70\x20\x3d\x20\x63\x74\x78\x2d\x3e\x6d\x61\x70\
+\x3b\0\x09\x69\x66\x20\x28\x21\x6d\x61\x70\x29\0\x30\x3a\x32\0\x09\x5f\x5f\x75\
+\x36\x34\x20\x73\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x20\x63\x74\x78\x2d\x3e\x6d\
+\x65\x74\x61\x2d\x3e\x73\x65\x71\x5f\x6e\x75\x6d\x3b\0\x09\x69\x66\x20\x28\x73\
+\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x3d\x20\x30\x29\0\x09\x09\x42\x50\x46\x5f\x53\
+\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x20\x20\x69\
+\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x5c\x6e\x22\x29\x3b\0\x62\x70\x66\
+\x5f\x6d\x61\x70\0\x69\x64\0\x6e\x61\x6d\x65\0\x6d\x61\x78\x5f\x65\x6e\x74\x72\
+\x69\x65\x73\0\x5f\x5f\x75\x33\x32\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\
+\x6e\x74\0\x63\x68\x61\x72\0\x5f\x5f\x41\x52\x52\x41\x59\x5f\x53\x49\x5a\x45\
+\x5f\x54\x59\x50\x45\x5f\x5f\0\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\
+\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x25\x34\x75\x20\x25\x2d\x31\x36\x73\
+\x25\x36\x64\x5c\x6e\x22\x2c\x20\x6d\x61\x70\x2d\x3e\x69\x64\x2c\x20\x6d\x61\
+\x70\x2d\x3e\x6e\x61\x6d\x65\x2c\x20\x6d\x61\x70\x2d\x3e\x6d\x61\x78\x5f\x65\
+\x6e\x74\x72\x69\x65\x73\x29\x3b\0\x7d\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\
+\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x70\x72\x6f\x67\0\x64\x75\x6d\x70\x5f\
+\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x70\x72\
+\x6f\x67\0\x09\x73\x74\x72\x75\x63\x74\x20\x62\x70\x66\x5f\x70\x72\x6f\x67\x20\
+\x2a\x70\x72\x6f\x67\x20\x3d\x20\x63\x74\x78\x2d\x3e\x70\x72\x6f\x67\x3b\0\x09\
+\x69\x66\x20\x28\x21\x70\x72\x6f\x67\x29\0\x62\x70\x66\x5f\x70\x72\x6f\x67\0\
+\x61\x75\x78\0\x09\x61\x75\x78\x20\x3d\x20\x70\x72\x6f\x67\x2d\x3e\x61\x75\x78\
+\x3b\0\x09\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\
+\x65\x71\x2c\x20\x22\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\x74\x61\x63\x68\x65\x64\x5c\x6e\x22\
+\x29\x3b\0\x62\x70\x66\x5f\x70\x72\x6f\x67\x5f\x61\x75\x78\0\x61\x74\x74\x61\
+\x63\x68\x5f\x66\x75\x6e\x63\x5f\x6e\x61\x6d\x65\0\x64\x73\x74\x5f\x70\x72\x6f\
+\x67\0\x66\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\x62\x74\x66\0\x09\x42\x50\x46\x5f\
+\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x25\x34\
+\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\x25\x73\x5c\x6e\x22\x2c\x20\x61\
+\x75\x78\x2d\x3e\x69\x64\x2c\0\x30\x3a\x34\0\x30\x3a\x35\0\x09\x69\x66\x20\x28\
+\x21\x62\x74\x66\x29\0\x62\x70\x66\x5f\x66\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\
+\x69\x6e\x73\x6e\x5f\x6f\x66\x66\0\x74\x79\x70\x65\x5f\x69\x64\0\x30\0\x73\x74\
+\x72\x69\x6e\x67\x73\0\x74\x79\x70\x65\x73\0\x68\x64\x72\0\x62\x74\x66\x5f\x68\
+\x65\x61\x64\x65\x72\0\x73\x74\x72\x5f\x6c\x65\x6e\0\x09\x74\x79\x70\x65\x73\
+\x20\x3d\x20\x62\x74\x66\x2d\x3e\x74\x79\x70\x65\x73\x3b\0\x09\x62\x70\x66\x5f\
+\x70\x72\x6f\x62\x65\x5f\x72\x65\x61\x64\x5f\x6b\x65\x72\x6e\x65\x6c\x28\x26\
+\x74\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x74\x29\x2c\x20\x74\x79\x70\x65\x73\
+\x20\x2b\x20\x62\x74\x66\x5f\x69\x64\x29\x3b\0\x09\x73\x74\x72\x20\x3d\x20\x62\
+\x74\x66\x2d\x3e\x73\x74\x72\x69\x6e\x67\x73\x3b\0\x62\x74\x66\x5f\x74\x79\x70\
+\x65\0\x6e\x61\x6d\x65\x5f\x6f\x66\x66\0\x09\x6e\x61\x6d\x65\x5f\x6f\x66\x66\
+\x20\x3d\x20\x42\x50\x46\x5f\x43\x4f\x52\x45\x5f\x52\x45\x41\x44\x28\x74\x2c\
+\x20\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x29\x3b\0\x30\x3a\x32\x3a\x30\0\x09\x69\
+\x66\x20\x28\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x20\x3e\x3d\x20\x62\x74\x66\x2d\
+\x3e\x68\x64\x72\x2e\x73\x74\x72\x5f\x6c\x65\x6e\x29\0\x09\x72\x65\x74\x75\x72\
+\x6e\x20\x73\x74\x72\x20\x2b\x20\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x3b\0\x30\x3a\
+\x33\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\
+\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\
+\x74\x2e\x31\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\
+\x5f\x66\x6d\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\
+\x5f\x5f\x66\x6d\x74\x2e\x32\0\x4c\x49\x43\x45\x4e\x53\x45\0\x2e\x72\x6f\x64\
+\x61\x74\x61\0\x6c\x69\x63\x65\x6e\x73\x65\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\x09\x4c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\0\0\0\x04\0\0\0\x62\0\0\0\
+\x01\0\0\0\x80\0\0\0\0\0\0\0\0\x69\x74\x65\x72\x61\x74\x6f\x72\x2e\x72\x6f\x64\
+\x61\x74\x61\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x2f\0\0\0\0\0\0\0\0\0\0\0\0\x20\
+\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x0a\0\x25\x34\x75\x20\x25\
+\x2d\x31\x36\x73\x25\x36\x64\x0a\0\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\x74\x61\x63\x68\x65\x64\
+\x0a\0\x25\x34\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\x25\x73\x0a\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\x47\x50\x4c\0\0\0\0\0\x79\x21\0\0\0\0\0\0\x79\x62\0\0\
+\0\0\0\0\x79\x71\0\x08\0\0\0\0\x15\x70\0\x1a\0\0\0\0\x79\x12\0\x10\0\0\0\0\x55\
+\x10\0\x08\0\0\0\0\xbf\x4a\0\0\0\0\0\0\x07\x40\0\0\xff\xff\xff\xe8\xbf\x16\0\0\
+\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb7\x30\0\0\0\0\0\x23\xb7\x50\0\0\
+\0\0\0\0\x85\0\0\0\0\0\0\x7e\x61\x17\0\0\0\0\0\0\x7b\xa1\xff\xe8\0\0\0\0\xb7\
+\x10\0\0\0\0\0\x04\xbf\x27\0\0\0\0\0\0\x0f\x21\0\0\0\0\0\0\x7b\xa2\xff\xf0\0\0\
+\0\0\x61\x17\0\x14\0\0\0\0\x7b\xa1\xff\xf8\0\0\0\0\xbf\x4a\0\0\0\0\0\0\x07\x40\
+\0\0\xff\xff\xff\xe8\xbf\x16\0\0\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\0\x23\
+\xb7\x30\0\0\0\0\0\x0e\xb7\x50\0\0\0\0\0\x18\x85\0\0\0\0\0\0\x7e\xb7\0\0\0\0\0\
+\0\0\x95\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x07\0\0\0\0\0\0\0\x42\0\0\0\x9a\0\x01\x3c\
+\x1e\0\0\0\x01\0\0\0\x42\0\0\0\x9a\0\x01\x3c\x24\0\0\0\x02\0\0\0\x42\0\0\x01\
+\x0d\0\x01\x44\x1d\0\0\0\x03\0\0\0\x42\0\0\x01\x2e\0\x01\x4c\x06\0\0\0\x04\0\0\
+\0\x42\0\0\x01\x3d\0\x01\x40\x1d\0\0\0\x05\0\0\0\x42\0\0\x01\x62\0\x01\x58\x06\
+\0\0\0\x07\0\0\0\x42\0\0\x01\x75\0\x01\x5c\x03\0\0\0\x0e\0\0\0\x42\0\0\x01\xfb\
+\0\x01\x64\x02\0\0\0\x1e\0\0\0\x42\0\0\x02\x49\0\x01\x6c\x01\0\0\0\0\0\0\0\x02\
+\0\0\0\x3e\0\0\0\0\0\0\0\x08\0\0\0\x08\0\0\0\x3e\0\0\0\0\0\0\0\x10\0\0\0\x02\0\
+\0\x01\x09\0\0\0\0\0\0\0\x20\0\0\0\x08\0\0\x01\x39\0\0\0\0\0\0\0\x70\0\0\0\x0d\
+\0\0\0\x3e\0\0\0\0\0\0\0\x80\0\0\0\x0d\0\0\x01\x09\0\0\0\0\0\0\0\xa0\0\0\0\x0d\
+\0\0\x01\x39\0\0\0\0\0\0\0\x1a\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\
+\x6d\x61\x70\0\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\
+\x01\0\0\0\x10\0\0\0\0\0\0\0\0\0\0\0\x09\0\0\0\x01\0\0\0\0\0\0\0\x07\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\
+\x62\x70\x66\x5f\x6d\x61\x70\0\0\0\0\0\0\0\0\x47\x50\x4c\0\0\0\0\0\x79\x21\0\0\
+\0\0\0\0\x79\x62\0\0\0\0\0\0\x79\x11\0\x08\0\0\0\0\x15\x10\0\x3b\0\0\0\0\x79\
+\x71\0\0\0\0\0\0\x79\x12\0\x10\0\0\0\0\x55\x10\0\x08\0\0\0\0\xbf\x4a\0\0\0\0\0\
+\0\x07\x40\0\0\xff\xff\xff\xd0\xbf\x16\0\0\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\x31\xb7\x30\0\0\0\0\0\x20\xb7\x50\0\0\0\0\0\0\x85\0\0\0\0\0\0\x7e\x7b\
+\xa6\xff\xc8\0\0\0\0\x61\x17\0\0\0\0\0\0\x7b\xa1\xff\xd0\0\0\0\0\xb7\x30\0\0\0\
+\0\0\x04\xbf\x97\0\0\0\0\0\0\x0f\x93\0\0\0\0\0\0\x79\x17\0\x28\0\0\0\0\x79\x87\
+\0\x30\0\0\0\0\x15\x80\0\x18\0\0\0\0\xb7\x20\0\0\0\0\0\0\x0f\x12\0\0\0\0\0\0\
+\x61\x11\0\x04\0\0\0\0\x79\x38\0\x08\0\0\0\0\x67\x10\0\0\0\0\0\x03\x0f\x31\0\0\
+\0\0\0\0\x79\x68\0\0\0\0\0\0\xbf\x1a\0\0\0\0\0\0\x07\x10\0\0\xff\xff\xff\xf8\
+\xb7\x20\0\0\0\0\0\x08\x85\0\0\0\0\0\0\x71\xb7\x10\0\0\0\0\0\0\x79\x3a\xff\xf8\
+\0\0\0\0\x0f\x31\0\0\0\0\0\0\xbf\x1a\0\0\0\0\0\0\x07\x10\0\0\xff\xff\xff\xf4\
+\xb7\x20\0\0\0\0\0\x04\x85\0\0\0\0\0\0\x71\xb7\x30\0\0\0\0\0\x04\x61\x1a\xff\
+\xf4\0\0\0\0\x61\x28\0\x10\0\0\0\0\x3d\x12\0\x02\0\0\0\0\x0f\x61\0\0\0\0\0\0\
+\xbf\x96\0\0\0\0\0\0\x7b\xa9\xff\xd8\0\0\0\0\x79\x17\0\x18\0\0\0\0\x7b\xa1\xff\
+\xe0\0\0\0\0\x79\x17\0\x20\0\0\0\0\x79\x11\0\0\0\0\0\0\x0f\x13\0\0\0\0\0\0\x7b\
+\xa1\xff\xe8\0\0\0\0\xbf\x4a\0\0\0\0\0\0\x07\x40\0\0\xff\xff\xff\xd0\x79\x1a\
+\xff\xc8\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\0\x51\xb7\x30\0\0\0\0\0\x11\
+\xb7\x50\0\0\0\0\0\x20\x85\0\0\0\0\0\0\x7e\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\x17\0\0\0\0\0\0\0\x42\0\0\0\x9a\0\x01\x80\x1e\0\0\0\x01\0\0\0\
+\x42\0\0\0\x9a\0\x01\x80\x24\0\0\0\x02\0\0\0\x42\0\0\x02\x7f\0\x01\x88\x1f\0\0\
+\0\x03\0\0\0\x42\0\0\x02\xa3\0\x01\x94\x06\0\0\0\x04\0\0\0\x42\0\0\x02\xbc\0\
+\x01\xa0\x0e\0\0\0\x05\0\0\0\x42\0\0\x01\x3d\0\x01\x84\x1d\0\0\0\x06\0\0\0\x42\
+\0\0\x01\x62\0\x01\xa4\x06\0\0\0\x08\0\0\0\x42\0\0\x02\xce\0\x01\xa8\x03\0\0\0\
+\x10\0\0\0\x42\0\0\x03\x3e\0\x01\xb0\x02\0\0\0\x17\0\0\0\x42\0\0\x03\x79\0\x01\
+\x04\x06\0\0\0\x1a\0\0\0\x42\0\0\x03\x3e\0\x01\xb0\x02\0\0\0\x1b\0\0\0\x42\0\0\
+\x03\xca\0\x01\x10\x0f\0\0\0\x1c\0\0\0\x42\0\0\x03\xdf\0\x01\x14\x2d\0\0\0\x1e\
+\0\0\0\x42\0\0\x04\x16\0\x01\x0c\x0d\0\0\0\x20\0\0\0\x42\0\0\x03\x3e\0\x01\xb0\
+\x02\0\0\0\x21\0\0\0\x42\0\0\x03\xdf\0\x01\x14\x02\0\0\0\x24\0\0\0\x42\0\0\x04\
+\x3d\0\x01\x18\x0d\0\0\0\x27\0\0\0\x42\0\0\x03\x3e\0\x01\xb0\x02\0\0\0\x28\0\0\
+\0\x42\0\0\x04\x3d\0\x01\x18\x0d\0\0\0\x2b\0\0\0\x42\0\0\x04\x3d\0\x01\x18\x0d\
+\0\0\0\x2c\0\0\0\x42\0\0\x04\x6b\0\x01\x1c\x1b\0\0\0\x2d\0\0\0\x42\0\0\x04\x6b\
+\0\x01\x1c\x06\0\0\0\x2e\0\0\0\x42\0\0\x04\x8e\0\x01\x24\x0d\0\0\0\x30\0\0\0\
+\x42\0\0\x03\x3e\0\x01\xb0\x02\0\0\0\x3f\0\0\0\x42\0\0\x02\x49\0\x01\xc0\x01\0\
+\0\0\0\0\0\0\x14\0\0\0\x3e\0\0\0\0\0\0\0\x08\0\0\0\x08\0\0\0\x3e\0\0\0\0\0\0\0\
+\x10\0\0\0\x14\0\0\x01\x09\0\0\0\0\0\0\0\x20\0\0\0\x18\0\0\0\x3e\0\0\0\0\0\0\0\
+\x28\0\0\0\x08\0\0\x01\x39\0\0\0\0\0\0\0\x80\0\0\0\x1a\0\0\0\x3e\0\0\0\0\0\0\0\
+\x90\0\0\0\x1a\0\0\x01\x09\0\0\0\0\0\0\0\xa8\0\0\0\x1a\0\0\x03\x71\0\0\0\0\0\0\
+\0\xb0\0\0\0\x1a\0\0\x03\x75\0\0\0\0\0\0\0\xc0\0\0\0\x1f\0\0\x03\xa3\0\0\0\0\0\
+\0\0\xd8\0\0\0\x20\0\0\x01\x09\0\0\0\0\0\0\0\xf0\0\0\0\x20\0\0\0\x3e\0\0\0\0\0\
+\0\x01\x18\0\0\0\x24\0\0\0\x3e\0\0\0\0\0\0\x01\x50\0\0\0\x1a\0\0\x01\x09\0\0\0\
+\0\0\0\x01\x60\0\0\0\x20\0\0\x04\x65\0\0\0\0\0\0\x01\x88\0\0\0\x1a\0\0\x01\x39\
+\0\0\0\0\0\0\x01\x98\0\0\0\x1a\0\0\x04\xa6\0\0\0\0\0\0\x01\xa0\0\0\0\x18\0\0\0\
+\x3e\0\0\0\0\0\0\0\x1a\0\0\0\x41\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\
+\x6f\x67\0\0\0\0\0\0\0\0\0\0\x1c\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\x01\0\
+\0\0\x10\0\0\0\0\0\0\0\0\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x12\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\0\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\x62\x70\
+\x66\x5f\x70\x72\x6f\x67\0\0\0\0\0\0\0";
+ opts.insns_sz = 2216;
+ opts.insns = (void *)"\
+\xbf\x61\0\0\0\0\0\0\xbf\x1a\0\0\0\0\0\0\x07\x10\0\0\xff\xff\xff\x78\xb7\x20\0\
+\0\0\0\0\x88\xb7\x30\0\0\0\0\0\0\x85\0\0\0\0\0\0\x71\x05\0\0\x14\0\0\0\0\x61\
+\x1a\xff\x78\0\0\0\0\xd5\x10\0\x01\0\0\0\0\x85\0\0\0\0\0\0\xa8\x61\x1a\xff\x7c\
+\0\0\0\0\xd5\x10\0\x01\0\0\0\0\x85\0\0\0\0\0\0\xa8\x61\x1a\xff\x80\0\0\0\0\xd5\
+\x10\0\x01\0\0\0\0\x85\0\0\0\0\0\0\xa8\x61\x1a\xff\x84\0\0\0\0\xd5\x10\0\x01\0\
+\0\0\0\x85\0\0\0\0\0\0\xa8\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\x10\0\0\0\0\
+\0\0\xd5\x10\0\x02\0\0\0\0\xbf\x91\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa8\xbf\x07\0\0\
+\0\0\0\0\x95\0\0\0\0\0\0\0\x61\x06\0\x08\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\
+\0\x0e\x68\x63\x10\0\0\0\0\0\0\x61\x06\0\x0c\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\
+\0\0\0\x0e\x64\x63\x10\0\0\0\0\0\0\x79\x06\0\x10\0\0\0\0\x18\x16\0\0\0\0\0\0\0\
+\0\0\0\0\0\x0e\x58\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x05\0\
+\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0e\x50\x7b\x10\0\0\0\0\0\0\xb7\x10\0\0\0\0\0\
+\x12\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\x0e\x50\xb7\x30\0\0\0\0\0\x1c\x85\0\0\0\0\
+\0\0\xa6\xbf\x70\0\0\0\0\0\0\xc5\x70\xff\xd4\0\0\0\0\x63\xa7\xff\x78\0\0\0\0\
+\x61\x0a\xff\x78\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0e\xa0\x63\x10\0\0\0\
+\0\0\0\x61\x06\0\x1c\0\0\0\0\x15\0\0\x03\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\
+\0\x0e\x7c\x63\x10\0\0\0\0\0\0\xb7\x10\0\0\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\0\
+\0\0\x0e\x70\xb7\x30\0\0\0\0\0\x48\x85\0\0\0\0\0\0\xa6\xbf\x70\0\0\0\0\0\0\xc5\
+\x70\xff\xc3\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x63\x17\0\0\0\0\0\0\
+\x79\x36\0\x20\0\0\0\0\x15\x30\0\x08\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\
+\x0e\xb8\xb7\x20\0\0\0\0\0\x62\x61\x06\0\x04\0\0\0\0\x45\0\0\x02\0\0\0\x01\x85\
+\0\0\0\0\0\0\x94\x05\0\0\x01\0\0\0\0\x85\0\0\0\0\0\0\x71\x18\x26\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\x61\x02\0\0\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x28\x63\
+\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x20\x18\x16\0\0\0\0\0\0\0\
+\0\0\0\0\0\x0f\x30\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x0e\xb8\
+\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x38\x7b\x10\0\0\0\0\0\0\xb7\x10\0\0\0\0\0\
+\x02\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x28\xb7\x30\0\0\0\0\0\x20\x85\0\0\0\0\
+\0\0\xa6\xbf\x70\0\0\0\0\0\0\xc5\x70\xff\x9f\0\0\0\0\x18\x26\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\x61\x02\0\0\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x48\x63\x10\
+\0\0\0\0\0\0\xb7\x10\0\0\0\0\0\x16\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x48\xb7\
+\x30\0\0\0\0\0\x04\x85\0\0\0\0\0\0\xa6\xbf\x70\0\0\0\0\0\0\xc5\x70\xff\x92\0\0\
+\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x50\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\
+\x11\x70\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x0f\x58\x18\x16\0\
+\0\0\0\0\0\0\0\0\0\0\0\x11\x68\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\
+\0\0\x10\x58\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xb0\x7b\x10\0\0\0\0\0\0\x18\
+\x06\0\0\0\0\0\0\0\0\0\0\0\0\x10\x60\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xc0\
+\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x10\xf0\x18\x16\0\0\0\0\0\
+\0\0\0\0\0\0\0\x11\xe0\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xd8\x7b\x10\0\0\0\0\0\0\x61\x06\0\x08\0\0\
+\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\x78\x63\x10\0\0\0\0\0\0\x61\x06\0\x0c\
+\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\x7c\x63\x10\0\0\0\0\0\0\x79\x06\0\
+\x10\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\x80\x7b\x10\0\0\0\0\0\0\x61\
+\x0a\xff\x78\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xa8\x63\x10\0\0\0\0\0\
+\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x11\xf0\xb7\x20\0\0\0\0\0\x11\xb7\x30\0\0\0\
+\0\0\x0c\xb7\x40\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa7\xbf\x70\0\0\0\0\0\0\xc5\x70\
+\xff\x5c\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x11\x60\x63\x07\0\x6c\0\0\0\0\
+\x77\x70\0\0\0\0\0\x20\x63\x07\0\x70\0\0\0\0\xb7\x10\0\0\0\0\0\x05\x18\x26\0\0\
+\0\0\0\0\0\0\0\0\0\0\x11\x60\xb7\x30\0\0\0\0\0\x8c\x85\0\0\0\0\0\0\xa6\xbf\x70\
+\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x11\xd0\x61\x10\0\0\0\0\0\0\xd5\
+\x10\0\x02\0\0\0\0\xbf\x91\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa8\xc5\x70\xff\x4a\0\0\
+\0\0\x63\xa7\xff\x80\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x12\x08\x18\x16\0\
+\0\0\0\0\0\0\0\0\0\0\0\x16\xe0\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\
+\0\0\x12\x10\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x16\xd8\x7b\x10\0\0\0\0\0\0\x18\
+\x06\0\0\0\0\0\0\0\0\0\0\0\0\x14\x18\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\x20\
+\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x14\x20\x18\x16\0\0\0\0\0\
+\0\0\0\0\0\0\0\x17\x30\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x15\
+\xb0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\x50\x7b\x10\0\0\0\0\0\0\x18\x06\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\x48\x7b\x10\0\0\0\0\
+\0\0\x61\x06\0\x08\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x16\xe8\x63\x10\0\0\
+\0\0\0\0\x61\x06\0\x0c\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x16\xec\x63\x10\
+\0\0\0\0\0\0\x79\x06\0\x10\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x16\xf0\x7b\
+\x10\0\0\0\0\0\0\x61\x0a\xff\x78\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\
+\x18\x63\x10\0\0\0\0\0\0\x18\x16\0\0\0\0\0\0\0\0\0\0\0\0\x17\x60\xb7\x20\0\0\0\
+\0\0\x12\xb7\x30\0\0\0\0\0\x0c\xb7\x40\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa7\xbf\x70\
+\0\0\0\0\0\0\xc5\x70\xff\x13\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x16\xd0\
+\x63\x07\0\x6c\0\0\0\0\x77\x70\0\0\0\0\0\x20\x63\x07\0\x70\0\0\0\0\xb7\x10\0\0\
+\0\0\0\x05\x18\x26\0\0\0\0\0\0\0\0\0\0\0\0\x16\xd0\xb7\x30\0\0\0\0\0\x8c\x85\0\
+\0\0\0\0\0\xa6\xbf\x70\0\0\0\0\0\0\x18\x06\0\0\0\0\0\0\0\0\0\0\0\0\x17\x40\x61\
+\x10\0\0\0\0\0\0\xd5\x10\0\x02\0\0\0\0\xbf\x91\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa8\
+\xc5\x70\xff\x01\0\0\0\0\x63\xa7\xff\x84\0\0\0\0\x61\x1a\xff\x78\0\0\0\0\xd5\
+\x10\0\x02\0\0\0\0\xbf\x91\0\0\0\0\0\0\x85\0\0\0\0\0\0\xa8\x61\x0a\xff\x80\0\0\
+\0\0\x63\x60\0\x28\0\0\0\0\x61\x0a\xff\x84\0\0\0\0\x63\x60\0\x2c\0\0\0\0\x18\
+\x16\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\x01\0\0\0\0\0\0\x63\x60\0\x18\0\0\0\0\xb7\
+\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0";
+ err = bpf_load_and_run(&opts);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static inline struct iterators_bpf *
+iterators_bpf__open_and_load(void)
+{
+ struct iterators_bpf *skel;
+
+ skel = iterators_bpf__open();
+ if (!skel)
+ return NULL;
+ if (iterators_bpf__load(skel)) {
+ iterators_bpf__destroy(skel);
+ return NULL;
+ }
+ return skel;
+}
+
+__attribute__((unused)) static void
+iterators_bpf__assert(struct iterators_bpf *s __attribute__((unused)))
+{
+#ifdef __cplusplus
+#define _Static_assert static_assert
+#endif
+#ifdef __cplusplus
+#undef _Static_assert
+#endif
+}
+
+#endif /* __ITERATORS_BPF_SKEL_H__ */
diff --git a/kernel/bpf/preload/iterators/iterators.lskel.h b/kernel/bpf/preload/iterators/iterators.lskel-little-endian.h
index 70f236a82fe1..70f236a82fe1 100644
--- a/kernel/bpf/preload/iterators/iterators.lskel.h
+++ b/kernel/bpf/preload/iterators/iterators.lskel-little-endian.h
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 99417b387547..bcc97613de76 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -390,7 +390,7 @@ static int bpf_map_alloc_id(struct bpf_map *map)
return id > 0 ? 0 : id;
}
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_free_id(struct bpf_map *map)
{
unsigned long flags;
@@ -402,18 +402,12 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
if (!map->id)
return;
- if (do_idr_lock)
- spin_lock_irqsave(&map_idr_lock, flags);
- else
- __acquire(&map_idr_lock);
+ spin_lock_irqsave(&map_idr_lock, flags);
idr_remove(&map_idr, map->id);
map->id = 0;
- if (do_idr_lock)
- spin_unlock_irqrestore(&map_idr_lock, flags);
- else
- __release(&map_idr_lock);
+ spin_unlock_irqrestore(&map_idr_lock, flags);
}
#ifdef CONFIG_MEMCG_KMEM
@@ -706,13 +700,13 @@ static void bpf_map_put_uref(struct bpf_map *map)
}
/* decrement map refcnt and schedule it for freeing via workqueue
- * (unrelying map implementation ops->map_free() might sleep)
+ * (underlying map implementation ops->map_free() might sleep)
*/
-static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_put(struct bpf_map *map)
{
if (atomic64_dec_and_test(&map->refcnt)) {
/* bpf_map_free_id() must be called first */
- bpf_map_free_id(map, do_idr_lock);
+ bpf_map_free_id(map);
btf_put(map->btf);
INIT_WORK(&map->work, bpf_map_free_deferred);
/* Avoid spawning kworkers, since they all might contend
@@ -721,11 +715,6 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
queue_work(system_unbound_wq, &map->work);
}
}
-
-void bpf_map_put(struct bpf_map *map)
-{
- __bpf_map_put(map, true);
-}
EXPORT_SYMBOL_GPL(bpf_map_put);
void bpf_map_put_with_uref(struct bpf_map *map)
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 793ecff29038..831f1f472bb8 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -26,7 +26,7 @@ static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
* rstat_cpu->updated_children list. See the comment on top of
* cgroup_rstat_cpu definition for details.
*/
-void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
+__bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
{
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
unsigned long flags;
@@ -231,7 +231,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
*
* This function may block.
*/
-void cgroup_rstat_flush(struct cgroup *cgrp)
+__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
{
might_sleep();
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 969e8f52f7da..b1cf259854ca 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/btf.h>
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/file.h>
@@ -975,7 +976,7 @@ void __noclone __crash_kexec(struct pt_regs *regs)
}
STACK_FRAME_NON_STANDARD(__crash_kexec);
-void crash_kexec(struct pt_regs *regs)
+__bpf_kfunc void crash_kexec(struct pt_regs *regs)
{
int old_cpu, this_cpu;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d1313435af2b..c58baf9983cc 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1237,7 +1237,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* Return: a bpf_key pointer with a valid key pointer if the key is found, a
* NULL pointer otherwise.
*/
-struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
+__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
{
key_ref_t key_ref;
struct bpf_key *bkey;
@@ -1286,7 +1286,7 @@ struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
* Return: a bpf_key pointer with an invalid key pointer set from the
* pre-determined ID on success, a NULL pointer otherwise
*/
-struct bpf_key *bpf_lookup_system_key(u64 id)
+__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
{
struct bpf_key *bkey;
@@ -1310,7 +1310,7 @@ struct bpf_key *bpf_lookup_system_key(u64 id)
* Decrement the reference count of the key inside *bkey*, if the pointer
* is valid, and free *bkey*.
*/
-void bpf_key_put(struct bpf_key *bkey)
+__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
{
if (bkey->has_ref)
key_put(bkey->key);
@@ -1330,7 +1330,7 @@ void bpf_key_put(struct bpf_key *bkey)
*
* Return: 0 on success, a negative value on error.
*/
-int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
+__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
struct bpf_dynptr_kern *sig_ptr,
struct bpf_key *trusted_keyring)
{
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 2b954326894f..b766a84c8536 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -484,7 +484,7 @@ out:
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in vmlinux BTF");
-int noinline bpf_fentry_test1(int a)
+__bpf_kfunc int bpf_fentry_test1(int a)
{
return a + 1;
}
@@ -529,27 +529,35 @@ int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
return (long)arg->a;
}
-int noinline bpf_modify_return_test(int a, int *b)
+__bpf_kfunc int bpf_modify_return_test(int a, int *b)
{
*b += 1;
return a + *b;
}
-u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
+__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
{
return a + b + c + d;
}
-int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
+__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
{
return a + b;
}
-struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
+__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
{
return sk;
}
+long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
+{
+ /* Provoke the compiler to assume that the caller has sign-extended a,
+ * b and c on platforms where this is required (e.g. s390x).
+ */
+ return (long)a + (long)b + (long)c + d;
+}
+
struct prog_test_member1 {
int a;
};
@@ -574,21 +582,21 @@ static struct prog_test_ref_kfunc prog_test_struct = {
.cnt = REFCOUNT_INIT(1),
};
-noinline struct prog_test_ref_kfunc *
+__bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
{
refcount_inc(&prog_test_struct.cnt);
return &prog_test_struct;
}
-noinline struct prog_test_member *
+__bpf_kfunc struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)
{
WARN_ON_ONCE(1);
return NULL;
}
-noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
+__bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
{
if (!p)
return;
@@ -596,11 +604,11 @@ noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
refcount_dec(&p->cnt);
}
-noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
+__bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
{
}
-noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
+__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
{
WARN_ON_ONCE(1);
}
@@ -613,12 +621,14 @@ static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const i
return (int *)p;
}
-noinline int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size)
+__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
+ const int rdwr_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
}
-noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
+__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
+ const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
@@ -628,16 +638,17 @@ noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
* Acquire functions must return struct pointers, so these ones are
* failing.
*/
-noinline int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
+__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
+ const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
-noinline void bpf_kfunc_call_int_mem_release(int *p)
+__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
{
}
-noinline struct prog_test_ref_kfunc *
+__bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
{
struct prog_test_ref_kfunc *p = READ_ONCE(*pp);
@@ -686,48 +697,53 @@ struct prog_test_fail3 {
char arr2[];
};
-noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
+__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
{
}
-noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
+__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
{
}
-noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
+__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
{
}
-noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
+__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
{
}
-noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
+__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
{
}
-noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
+__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
{
}
-noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
+__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
{
}
-noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
+__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
{
}
-noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
+__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
{
}
-noinline void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
+__bpf_kfunc void bpf_kfunc_call_test_destructive(void)
{
}
-noinline void bpf_kfunc_call_test_destructive(void)
+__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
{
+ return arg;
}
__diag_pop();
@@ -746,6 +762,7 @@ BTF_SET8_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
@@ -767,6 +784,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
BTF_SET8_END(test_sk_check_kfunc_ids)
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
diff --git a/net/core/Makefile b/net/core/Makefile
index 10edd66a8a37..8f367813bc68 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -12,7 +12,8 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
- fib_notifier.o xdp.o flow_offload.o gro.o
+ fib_notifier.o xdp.o flow_offload.o gro.o \
+ netdev-genl.o netdev-genl-gen.o
obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
diff --git a/net/core/dev.c b/net/core/dev.c
index bb42150a38ec..7307a0c15c9f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1614,6 +1614,7 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd)
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
+ N(XDP_FEAT_CHANGE)
}
#undef N
return "UNKNOWN_NETDEV_EVENT";
diff --git a/net/core/filter.c b/net/core/filter.c
index d8f9b53f3db6..2ce06a72a5ba 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4318,16 +4318,13 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
enum bpf_map_type map_type = ri->map_type;
- /* XDP_REDIRECT is not fully supported yet for xdp frags since
- * not all XDP capable drivers can map non-linear xdp_frame in
- * ndo_xdp_xmit.
- */
- if (unlikely(xdp_buff_has_frags(xdp) &&
- map_type != BPF_MAP_TYPE_CPUMAP))
- return -EOPNOTSUPP;
+ if (map_type == BPF_MAP_TYPE_XSKMAP) {
+ /* XDP_REDIRECT is not supported AF_XDP yet. */
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ return -EOPNOTSUPP;
- if (map_type == BPF_MAP_TYPE_XSKMAP)
return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
+ }
return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp),
xdp_prog);
@@ -7536,7 +7533,7 @@ static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv4_proto = {
.arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
.arg1_size = sizeof(struct iphdr),
.arg2_type = ARG_PTR_TO_MEM,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_3(bpf_tcp_raw_gen_syncookie_ipv6, struct ipv6hdr *, iph,
@@ -7568,7 +7565,7 @@ static const struct bpf_func_proto bpf_tcp_raw_gen_syncookie_ipv6_proto = {
.arg1_type = ARG_PTR_TO_FIXED_SIZE_MEM,
.arg1_size = sizeof(struct ipv6hdr),
.arg2_type = ARG_PTR_TO_MEM,
- .arg3_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_2(bpf_tcp_raw_check_syncookie_ipv4, struct iphdr *, iph,
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
new file mode 100644
index 000000000000..48812ec843f5
--- /dev/null
+++ b/net/core/netdev-genl-gen.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netdev-genl-gen.h"
+
+#include <linux/netdev.h>
+
+/* NETDEV_CMD_DEV_GET - do */
+static const struct nla_policy netdev_dev_get_nl_policy[NETDEV_A_DEV_IFINDEX + 1] = {
+ [NETDEV_A_DEV_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+/* Ops table for netdev */
+static const struct genl_split_ops netdev_nl_ops[2] = {
+ {
+ .cmd = NETDEV_CMD_DEV_GET,
+ .doit = netdev_nl_dev_get_doit,
+ .policy = netdev_dev_get_nl_policy,
+ .maxattr = NETDEV_A_DEV_IFINDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NETDEV_CMD_DEV_GET,
+ .dumpit = netdev_nl_dev_get_dumpit,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+};
+
+static const struct genl_multicast_group netdev_nl_mcgrps[] = {
+ [NETDEV_NLGRP_MGMT] = { "mgmt", },
+};
+
+struct genl_family netdev_nl_family __ro_after_init = {
+ .name = NETDEV_FAMILY_NAME,
+ .version = NETDEV_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = netdev_nl_ops,
+ .n_split_ops = ARRAY_SIZE(netdev_nl_ops),
+ .mcgrps = netdev_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(netdev_nl_mcgrps),
+};
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
new file mode 100644
index 000000000000..b16dc7e026bb
--- /dev/null
+++ b/net/core/netdev-genl-gen.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_NETDEV_GEN_H
+#define _LINUX_NETDEV_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <linux/netdev.h>
+
+int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info);
+int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+
+enum {
+ NETDEV_NLGRP_MGMT,
+};
+
+extern struct genl_family netdev_nl_family;
+
+#endif /* _LINUX_NETDEV_GEN_H */
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
new file mode 100644
index 000000000000..a4270fafdf11
--- /dev/null
+++ b/net/core/netdev-genl.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+#include "netdev-genl-gen.h"
+
+static int
+netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
+ u32 portid, u32 seq, int flags, u32 cmd)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(rsp, portid, seq, &netdev_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
+ nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
+ netdev->xdp_features, NETDEV_A_DEV_PAD)) {
+ genlmsg_cancel(rsp, hdr);
+ return -EINVAL;
+ }
+
+ genlmsg_end(rsp, hdr);
+
+ return 0;
+}
+
+static void
+netdev_genl_dev_notify(struct net_device *netdev, int cmd)
+{
+ struct sk_buff *ntf;
+
+ if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
+ NETDEV_NLGRP_MGMT))
+ return;
+
+ ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!ntf)
+ return;
+
+ if (netdev_nl_dev_fill(netdev, ntf, 0, 0, 0, cmd)) {
+ nlmsg_free(ntf);
+ return;
+ }
+
+ genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf,
+ 0, NETDEV_NLGRP_MGMT, GFP_KERNEL);
+}
+
+int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *netdev;
+ struct sk_buff *rsp;
+ u32 ifindex;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
+ return -EINVAL;
+
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ rtnl_lock();
+
+ netdev = __dev_get_by_index(genl_info_net(info), ifindex);
+ if (netdev)
+ err = netdev_nl_dev_fill(netdev, rsp, info->snd_portid,
+ info->snd_seq, 0, info->genlhdr->cmd);
+ else
+ err = -ENODEV;
+
+ rtnl_unlock();
+
+ if (err)
+ goto err_free_msg;
+
+ return genlmsg_reply(rsp, info);
+
+err_free_msg:
+ nlmsg_free(rsp);
+ return err;
+}
+
+int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct net_device *netdev;
+ int idx = 0, s_idx;
+ int h, s_h;
+ int err;
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+
+ rtnl_lock();
+
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ struct hlist_head *head;
+
+ idx = 0;
+ head = &net->dev_index_head[h];
+ hlist_for_each_entry(netdev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+ err = netdev_nl_dev_fill(netdev, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, 0,
+ NETDEV_CMD_DEV_GET);
+ if (err < 0)
+ break;
+cont:
+ idx++;
+ }
+ }
+
+ rtnl_unlock();
+
+ if (err != -EMSGSIZE)
+ return err;
+
+ cb->args[1] = idx;
+ cb->args[0] = h;
+ cb->seq = net->dev_base_seq;
+
+ return skb->len;
+}
+
+static int netdev_genl_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF);
+ break;
+ case NETDEV_UNREGISTER:
+ netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF);
+ break;
+ case NETDEV_XDP_FEAT_CHANGE:
+ netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block netdev_genl_nb = {
+ .notifier_call = netdev_genl_netdevice_event,
+};
+
+static int __init netdev_genl_init(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&netdev_genl_nb);
+ if (err)
+ return err;
+
+ err = genl_register_family(&netdev_nl_family);
+ if (err)
+ goto err_unreg_ntf;
+
+ return 0;
+
+err_unreg_ntf:
+ unregister_netdevice_notifier(&netdev_genl_nb);
+ return err;
+}
+
+subsys_initcall(netdev_genl_init);
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 03938fe6d33a..8c92fc553317 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -4,6 +4,7 @@
* Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
*/
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/filter.h>
#include <linux/types.h>
@@ -721,7 +722,7 @@ __diag_ignore_all("-Wmissing-prototypes",
*
* Returns 0 on success or ``-errno`` on error.
*/
-int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+__bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
{
return -EOPNOTSUPP;
}
@@ -733,7 +734,7 @@ int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
*
* Returns 0 on success or ``-errno`` on error.
*/
-int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
+__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
{
return -EOPNOTSUPP;
}
@@ -772,3 +773,21 @@ static int __init xdp_metadata_init(void)
return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set);
}
late_initcall(xdp_metadata_init);
+
+void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
+{
+ dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+ if (support_sg)
+ dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT_SG;
+
+ call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+}
+EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target);
+
+void xdp_features_clear_redirect_target(struct net_device *dev)
+{
+ dev->xdp_features &= ~(NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG);
+ call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+}
+EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index d2c470524e58..146792cd26fe 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -295,7 +295,7 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
}
/* override sysctl_tcp_min_tso_segs */
-static u32 bbr_min_tso_segs(struct sock *sk)
+__bpf_kfunc static u32 bbr_min_tso_segs(struct sock *sk)
{
return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
}
@@ -328,7 +328,7 @@ static void bbr_save_cwnd(struct sock *sk)
bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp));
}
-static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+__bpf_kfunc static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
@@ -1023,7 +1023,7 @@ static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
bbr_update_gains(sk);
}
-static void bbr_main(struct sock *sk, const struct rate_sample *rs)
+__bpf_kfunc static void bbr_main(struct sock *sk, const struct rate_sample *rs)
{
struct bbr *bbr = inet_csk_ca(sk);
u32 bw;
@@ -1035,7 +1035,7 @@ static void bbr_main(struct sock *sk, const struct rate_sample *rs)
bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
}
-static void bbr_init(struct sock *sk)
+__bpf_kfunc static void bbr_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
@@ -1077,7 +1077,7 @@ static void bbr_init(struct sock *sk)
cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
}
-static u32 bbr_sndbuf_expand(struct sock *sk)
+__bpf_kfunc static u32 bbr_sndbuf_expand(struct sock *sk)
{
/* Provision 3 * cwnd since BBR may slow-start even during recovery. */
return 3;
@@ -1086,7 +1086,7 @@ static u32 bbr_sndbuf_expand(struct sock *sk)
/* In theory BBR does not need to undo the cwnd since it does not
* always reduce cwnd on losses (see bbr_main()). Keep it for now.
*/
-static u32 bbr_undo_cwnd(struct sock *sk)
+__bpf_kfunc static u32 bbr_undo_cwnd(struct sock *sk)
{
struct bbr *bbr = inet_csk_ca(sk);
@@ -1097,7 +1097,7 @@ static u32 bbr_undo_cwnd(struct sock *sk)
}
/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
-static u32 bbr_ssthresh(struct sock *sk)
+__bpf_kfunc static u32 bbr_ssthresh(struct sock *sk)
{
bbr_save_cwnd(sk);
return tcp_sk(sk)->snd_ssthresh;
@@ -1125,7 +1125,7 @@ static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
return 0;
}
-static void bbr_set_state(struct sock *sk, u8 new_state)
+__bpf_kfunc static void bbr_set_state(struct sock *sk, u8 new_state)
{
struct bbr *bbr = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index d3cae40749e8..db8b4b488c31 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -403,7 +403,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
* ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
* returns the leftover acks to adjust cwnd in congestion avoidance mode.
*/
-u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
+__bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
@@ -417,7 +417,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
* for every packet that was ACKed.
*/
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
+__bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
{
/* If credits accumulated at a higher w, apply them gently now. */
if (tp->snd_cwnd_cnt >= w) {
@@ -443,7 +443,7 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
/* This is Jacobson's slow start and congestion avoidance.
* SIGCOMM '88, p. 328.
*/
-void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+__bpf_kfunc void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -462,7 +462,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
/* Slow start threshold is half the congestion window (min 2) */
-u32 tcp_reno_ssthresh(struct sock *sk)
+__bpf_kfunc u32 tcp_reno_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -470,7 +470,7 @@ u32 tcp_reno_ssthresh(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
-u32 tcp_reno_undo_cwnd(struct sock *sk)
+__bpf_kfunc u32 tcp_reno_undo_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 768c10c1f649..0fd78ecb67e7 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -126,7 +126,7 @@ static inline void bictcp_hystart_reset(struct sock *sk)
ca->sample_cnt = 0;
}
-static void cubictcp_init(struct sock *sk)
+__bpf_kfunc static void cubictcp_init(struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
@@ -139,7 +139,7 @@ static void cubictcp_init(struct sock *sk)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
-static void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+__bpf_kfunc static void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
struct bictcp *ca = inet_csk_ca(sk);
@@ -321,7 +321,7 @@ tcp_friendliness:
ca->cnt = max(ca->cnt, 2U);
}
-static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+__bpf_kfunc static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
@@ -338,7 +338,7 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
-static u32 cubictcp_recalc_ssthresh(struct sock *sk)
+__bpf_kfunc static u32 cubictcp_recalc_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
@@ -355,7 +355,7 @@ static u32 cubictcp_recalc_ssthresh(struct sock *sk)
return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
}
-static void cubictcp_state(struct sock *sk, u8 new_state)
+__bpf_kfunc static void cubictcp_state(struct sock *sk, u8 new_state)
{
if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
@@ -445,7 +445,7 @@ static void hystart_update(struct sock *sk, u32 delay)
}
}
-static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
+__bpf_kfunc static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index e0a2ca7456ff..bb23bb5b387a 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -75,7 +75,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
ca->old_delivered_ce = tp->delivered_ce;
}
-static void dctcp_init(struct sock *sk)
+__bpf_kfunc static void dctcp_init(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -104,7 +104,7 @@ static void dctcp_init(struct sock *sk)
INET_ECN_dontxmit(sk);
}
-static u32 dctcp_ssthresh(struct sock *sk)
+__bpf_kfunc static u32 dctcp_ssthresh(struct sock *sk)
{
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -113,7 +113,7 @@ static u32 dctcp_ssthresh(struct sock *sk)
return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * ca->dctcp_alpha) >> 11U), 2U);
}
-static void dctcp_update_alpha(struct sock *sk, u32 flags)
+__bpf_kfunc static void dctcp_update_alpha(struct sock *sk, u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct dctcp *ca = inet_csk_ca(sk);
@@ -169,7 +169,7 @@ static void dctcp_react_to_loss(struct sock *sk)
tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U);
}
-static void dctcp_state(struct sock *sk, u8 new_state)
+__bpf_kfunc static void dctcp_state(struct sock *sk, u8 new_state)
{
if (new_state == TCP_CA_Recovery &&
new_state != inet_csk(sk)->icsk_ca_state)
@@ -179,7 +179,7 @@ static void dctcp_state(struct sock *sk, u8 new_state)
*/
}
-static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+__bpf_kfunc static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
{
struct dctcp *ca = inet_csk_ca(sk);
@@ -229,7 +229,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
return 0;
}
-static u32 dctcp_cwnd_undo(struct sock *sk)
+__bpf_kfunc static u32 dctcp_cwnd_undo(struct sock *sk)
{
const struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index 24002bc61e07..34913521c385 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -249,7 +249,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
-struct nf_conn___init *
+__bpf_kfunc struct nf_conn___init *
bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
@@ -283,7 +283,7 @@ bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
-struct nf_conn *
+__bpf_kfunc struct nf_conn *
bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
@@ -316,7 +316,7 @@ bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
-struct nf_conn___init *
+__bpf_kfunc struct nf_conn___init *
bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
@@ -351,7 +351,7 @@ bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
-struct nf_conn *
+__bpf_kfunc struct nf_conn *
bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
@@ -376,7 +376,7 @@ bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
* @nfct - Pointer to referenced nf_conn___init object, obtained
* using bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
*/
-struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
+__bpf_kfunc struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
{
struct nf_conn *nfct = (struct nf_conn *)nfct_i;
int err;
@@ -400,7 +400,7 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
* @nf_conn - Pointer to referenced nf_conn object, obtained using
* bpf_xdp_ct_lookup or bpf_skb_ct_lookup.
*/
-void bpf_ct_release(struct nf_conn *nfct)
+__bpf_kfunc void bpf_ct_release(struct nf_conn *nfct)
{
if (!nfct)
return;
@@ -417,7 +417,7 @@ void bpf_ct_release(struct nf_conn *nfct)
* bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
* @timeout - Timeout in msecs.
*/
-void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
+__bpf_kfunc void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
{
__nf_ct_set_timeout((struct nf_conn *)nfct, msecs_to_jiffies(timeout));
}
@@ -432,7 +432,7 @@ void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
* bpf_ct_insert_entry, bpf_xdp_ct_lookup, or bpf_skb_ct_lookup.
* @timeout - New timeout in msecs.
*/
-int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
+__bpf_kfunc int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
{
return __nf_ct_change_timeout(nfct, msecs_to_jiffies(timeout));
}
@@ -447,7 +447,7 @@ int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
* bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
* @status - New status value.
*/
-int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
+__bpf_kfunc int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
{
return nf_ct_change_status_common((struct nf_conn *)nfct, status);
}
@@ -462,7 +462,7 @@ int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
* bpf_ct_insert_entry, bpf_xdp_ct_lookup or bpf_skb_ct_lookup.
* @status - New status value.
*/
-int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
+__bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
{
return nf_ct_change_status_common(nfct, status);
}
diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c
index 0fa5a0bbb0ff..141ee7783223 100644
--- a/net/netfilter/nf_nat_bpf.c
+++ b/net/netfilter/nf_nat_bpf.c
@@ -30,9 +30,9 @@ __diag_ignore_all("-Wmissing-prototypes",
* interpreted as select a random port.
* @manip - NF_NAT_MANIP_SRC or NF_NAT_MANIP_DST
*/
-int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
- union nf_inet_addr *addr, int port,
- enum nf_nat_manip_type manip)
+__bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
+ union nf_inet_addr *addr, int port,
+ enum nf_nat_manip_type manip)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
u16 proto = nf_ct_l3num(ct);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index ed6c71826d31..b2df1e0f8153 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -140,6 +140,10 @@ static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
}
}
+#define NETDEV_XDP_ACT_ZC (NETDEV_XDP_ACT_BASIC | \
+ NETDEV_XDP_ACT_REDIRECT | \
+ NETDEV_XDP_ACT_XSK_ZEROCOPY)
+
int xp_assign_dev(struct xsk_buff_pool *pool,
struct net_device *netdev, u16 queue_id, u16 flags)
{
@@ -178,8 +182,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
/* For copy-mode, we are done. */
return 0;
- if (!netdev->netdev_ops->ndo_bpf ||
- !netdev->netdev_ops->ndo_xsk_wakeup) {
+ if ((netdev->xdp_features & NETDEV_XDP_ACT_ZC) != NETDEV_XDP_ACT_ZC) {
err = -EOPNOTSUPP;
goto err_unreg_pool;
}
diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c
index 1ef2162cebcf..d74f3fd20f2b 100644
--- a/net/xfrm/xfrm_interface_bpf.c
+++ b/net/xfrm/xfrm_interface_bpf.c
@@ -39,8 +39,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* @to - Pointer to memory to which the metadata will be copied
* Cannot be NULL
*/
-__used noinline
-int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
+__bpf_kfunc int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct xfrm_md_info *info;
@@ -62,9 +61,7 @@ int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
* @from - Pointer to memory from which the metadata will be copied
* Cannot be NULL
*/
-__used noinline
-int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx,
- const struct bpf_xfrm_info *from)
+__bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bpf_xfrm_info *from)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct metadata_dst *md_dst;
diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c
index 50231c2eff9c..e7121dd1ee37 100644
--- a/samples/bpf/syscall_tp_kern.c
+++ b/samples/bpf/syscall_tp_kern.c
@@ -58,6 +58,13 @@ int trace_enter_open_at(struct syscalls_enter_open_args *ctx)
return 0;
}
+SEC("tracepoint/syscalls/sys_enter_openat2")
+int trace_enter_open_at2(struct syscalls_enter_open_args *ctx)
+{
+ count(&enter_open_map);
+ return 0;
+}
+
SEC("tracepoint/syscalls/sys_exit_open")
int trace_enter_exit(struct syscalls_exit_open_args *ctx)
{
@@ -71,3 +78,10 @@ int trace_enter_exit_at(struct syscalls_exit_open_args *ctx)
count(&exit_open_map);
return 0;
}
+
+SEC("tracepoint/syscalls/sys_exit_openat2")
+int trace_enter_exit_at2(struct syscalls_exit_open_args *ctx)
+{
+ count(&exit_open_map);
+ return 0;
+}
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index cfc9fdc1e863..e87738dbffc1 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -2233,10 +2233,38 @@ static void profile_close_perf_events(struct profiler_bpf *obj)
profile_perf_event_cnt = 0;
}
+static int profile_open_perf_event(int mid, int cpu, int map_fd)
+{
+ int pmu_fd;
+
+ pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
+ -1 /*pid*/, cpu, -1 /*group_fd*/, 0);
+ if (pmu_fd < 0) {
+ if (errno == ENODEV) {
+ p_info("cpu %d may be offline, skip %s profiling.",
+ cpu, metrics[mid].name);
+ profile_perf_event_cnt++;
+ return 0;
+ }
+ return -1;
+ }
+
+ if (bpf_map_update_elem(map_fd,
+ &profile_perf_event_cnt,
+ &pmu_fd, BPF_ANY) ||
+ ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ close(pmu_fd);
+ return -1;
+ }
+
+ profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
+ return 0;
+}
+
static int profile_open_perf_events(struct profiler_bpf *obj)
{
unsigned int cpu, m;
- int map_fd, pmu_fd;
+ int map_fd;
profile_perf_events = calloc(
sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
@@ -2255,17 +2283,11 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
if (!metrics[m].selected)
continue;
for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
- pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
- -1/*pid*/, cpu, -1/*group_fd*/, 0);
- if (pmu_fd < 0 ||
- bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
- &pmu_fd, BPF_ANY) ||
- ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ if (profile_open_perf_event(m, cpu, map_fd)) {
p_err("failed to create event %s on cpu %d",
metrics[m].name, cpu);
return -1;
}
- profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
}
}
return 0;
diff --git a/tools/bpf/resolve_btfids/Build b/tools/bpf/resolve_btfids/Build
index ae82da03f9bf..077de3829c72 100644
--- a/tools/bpf/resolve_btfids/Build
+++ b/tools/bpf/resolve_btfids/Build
@@ -1,3 +1,5 @@
+hostprogs := resolve_btfids
+
resolve_btfids-y += main.o
resolve_btfids-y += rbtree.o
resolve_btfids-y += zalloc.o
@@ -7,4 +9,4 @@ resolve_btfids-y += str_error_r.o
$(OUTPUT)%.o: ../../lib/%.c FORCE
$(call rule_mkdir)
- $(call if_changed_dep,cc_o_c)
+ $(call if_changed_dep,host_cc_o_c)
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index daed388aa5d7..ac548a7baa73 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -17,11 +17,14 @@ else
MAKEFLAGS=--no-print-directory
endif
-# always use the host compiler
+# Overrides for the prepare step libraries.
HOST_OVERRIDES := AR="$(HOSTAR)" CC="$(HOSTCC)" LD="$(HOSTLD)" ARCH="$(HOSTARCH)" \
- EXTRA_CFLAGS="$(HOSTCFLAGS) $(KBUILD_HOSTCFLAGS)"
+ CROSS_COMPILE="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
RM ?= rm
+HOSTCC ?= gcc
+HOSTLD ?= ld
+HOSTAR ?= ar
CROSS_COMPILE =
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
@@ -64,7 +67,7 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OU
LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
-CFLAGS += -g \
+HOSTCFLAGS += -g \
-I$(srctree)/tools/include \
-I$(srctree)/tools/include/uapi \
-I$(LIBBPF_INCLUDE) \
@@ -73,11 +76,11 @@ CFLAGS += -g \
LIBS = $(LIBELF_LIBS) -lz
-export srctree OUTPUT CFLAGS Q
+export srctree OUTPUT HOSTCFLAGS Q HOSTCC HOSTLD HOSTAR
include $(srctree)/tools/build/Makefile.include
$(BINARY_IN): fixdep FORCE prepare | $(OUTPUT)
- $(Q)$(MAKE) $(build)=resolve_btfids $(HOST_OVERRIDES)
+ $(Q)$(MAKE) $(build)=resolve_btfids
$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
$(call msg,LINK,$@)
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index 8b3d87b82b7a..47acf6936516 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -13,6 +13,8 @@ BPF_DESTDIR := $(BPFOBJ_OUTPUT)
BPF_INCLUDE := $(BPF_DESTDIR)/include
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS)
+CFLAGS += $(EXTRA_CFLAGS)
+LDFLAGS += $(EXTRA_LDFLAGS)
# Try to detect best kernel BTF source
KERNEL_REL := $(shell uname -r)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 7f024ac22edd..17afd2b35ee5 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -2801,7 +2801,7 @@ union bpf_attr {
*
* long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
- * For en eBPF program attached to a perf event, retrieve the
+ * For an eBPF program attached to a perf event, retrieve the
* value of the event counter associated to *ctx* and store it in
* the structure pointed by *buf* and of size *buf_size*. Enabled
* and running times are also stored in the structure (see
@@ -5817,8 +5817,8 @@ enum {
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
- BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7),
- BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8),
};
enum {
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
new file mode 100644
index 000000000000..9ee459872600
--- /dev/null
+++ b/tools/include/uapi/linux/netdev.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NETDEV_H
+#define _UAPI_LINUX_NETDEV_H
+
+#define NETDEV_FAMILY_NAME "netdev"
+#define NETDEV_FAMILY_VERSION 1
+
+/**
+ * enum netdev_xdp_act
+ * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers
+ * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
+ * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
+ * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
+ * ndo_xdp_xmit callback.
+ * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP
+ * in zero copy mode.
+ * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw
+ * oflloading.
+ * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear
+ * XDP buffer support in the driver napi callback.
+ * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements
+ * non-linear XDP buffer support in ndo_xdp_xmit callback.
+ */
+enum netdev_xdp_act {
+ NETDEV_XDP_ACT_BASIC = 1,
+ NETDEV_XDP_ACT_REDIRECT = 2,
+ NETDEV_XDP_ACT_NDO_XMIT = 4,
+ NETDEV_XDP_ACT_XSK_ZEROCOPY = 8,
+ NETDEV_XDP_ACT_HW_OFFLOAD = 16,
+ NETDEV_XDP_ACT_RX_SG = 32,
+ NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+};
+
+enum {
+ NETDEV_A_DEV_IFINDEX = 1,
+ NETDEV_A_DEV_PAD,
+ NETDEV_A_DEV_XDP_FEATURES,
+
+ __NETDEV_A_DEV_MAX,
+ NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
+};
+
+enum {
+ NETDEV_CMD_DEV_GET = 1,
+ NETDEV_CMD_DEV_ADD_NTF,
+ NETDEV_CMD_DEV_DEL_NTF,
+ NETDEV_CMD_DEV_CHANGE_NTF,
+
+ __NETDEV_CMD_MAX,
+ NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
+};
+
+#define NETDEV_MCGRP_MGMT "mgmt"
+
+#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index 496e6a8ee0dc..1ac57bb7ac55 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -364,7 +364,7 @@ enum bpf_enum_value_kind {
/* Non-CO-RE variant of BPF_CORE_READ_INTO() */
#define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \
- ___core_read(bpf_probe_read, bpf_probe_read, \
+ ___core_read(bpf_probe_read_kernel, bpf_probe_read_kernel, \
dst, (src), a, ##__VA_ARGS__) \
})
@@ -400,7 +400,7 @@ enum bpf_enum_value_kind {
/* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */
#define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \
- ___core_read(bpf_probe_read_str, bpf_probe_read, \
+ ___core_read(bpf_probe_read_kernel_str, bpf_probe_read_kernel, \
dst, (src), a, ##__VA_ARGS__) \
})
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index d37c4fe2849d..5ec1871acb2f 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -109,7 +109,7 @@
* This is a variable-specific variant of more global barrier().
*/
#ifndef barrier_var
-#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
+#define barrier_var(var) asm volatile("" : "+r"(var))
#endif
/*
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index eed5cec6f510..35a698eb825d 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -34,7 +34,6 @@
#include <linux/limits.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
-#include <linux/version.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -870,42 +869,6 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return 0;
}
-__u32 get_kernel_version(void)
-{
- /* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release,
- * but Ubuntu provides /proc/version_signature file, as described at
- * https://ubuntu.com/kernel, with an example contents below, which we
- * can use to get a proper LINUX_VERSION_CODE.
- *
- * Ubuntu 5.4.0-12.15-generic 5.4.8
- *
- * In the above, 5.4.8 is what kernel is actually expecting, while
- * uname() call will return 5.4.0 in info.release.
- */
- const char *ubuntu_kver_file = "/proc/version_signature";
- __u32 major, minor, patch;
- struct utsname info;
-
- if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) == 0) {
- FILE *f;
-
- f = fopen(ubuntu_kver_file, "r");
- if (f) {
- if (fscanf(f, "%*s %*s %d.%d.%d\n", &major, &minor, &patch) == 3) {
- fclose(f);
- return KERNEL_VERSION(major, minor, patch);
- }
- fclose(f);
- }
- /* something went wrong, fall back to uname() approach */
- }
-
- uname(&info);
- if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
- return 0;
- return KERNEL_VERSION(major, minor, patch);
-}
-
static const struct btf_member *
find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
{
@@ -11710,17 +11673,22 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
const size_t attr_sz = sizeof(struct perf_event_attr);
struct perf_buffer_params p = {};
struct perf_event_attr attr;
+ __u32 sample_period;
if (!OPTS_VALID(opts, perf_buffer_opts))
return libbpf_err_ptr(-EINVAL);
+ sample_period = OPTS_GET(opts, sample_period, 1);
+ if (!sample_period)
+ sample_period = 1;
+
memset(&attr, 0, attr_sz);
attr.size = attr_sz;
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
+ attr.sample_period = sample_period;
+ attr.wakeup_events = sample_period;
p.attr = &attr;
p.sample_cb = sample_cb;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 8777ff21ea1d..2efd80f6f7b9 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1048,9 +1048,10 @@ struct bpf_xdp_query_opts {
__u32 hw_prog_id; /* output */
__u32 skb_prog_id; /* output */
__u8 attach_mode; /* output */
+ __u64 feature_flags; /* output */
size_t :0;
};
-#define bpf_xdp_query_opts__last_field attach_mode
+#define bpf_xdp_query_opts__last_field feature_flags
LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags,
const struct bpf_xdp_attach_opts *opts);
@@ -1246,8 +1247,10 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
/* common use perf buffer options */
struct perf_buffer_opts {
size_t sz;
+ __u32 sample_period;
+ size_t :0;
};
-#define perf_buffer_opts__last_field sz
+#define perf_buffer_opts__last_field sample_period
/**
* @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index b44fcbb4b42e..4f3bc968ff8e 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -12,11 +12,94 @@
#include <linux/btf.h>
#include <linux/filter.h>
#include <linux/kernel.h>
+#include <linux/version.h>
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
+/* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release,
+ * but Ubuntu provides /proc/version_signature file, as described at
+ * https://ubuntu.com/kernel, with an example contents below, which we
+ * can use to get a proper LINUX_VERSION_CODE.
+ *
+ * Ubuntu 5.4.0-12.15-generic 5.4.8
+ *
+ * In the above, 5.4.8 is what kernel is actually expecting, while
+ * uname() call will return 5.4.0 in info.release.
+ */
+static __u32 get_ubuntu_kernel_version(void)
+{
+ const char *ubuntu_kver_file = "/proc/version_signature";
+ __u32 major, minor, patch;
+ int ret;
+ FILE *f;
+
+ if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) != 0)
+ return 0;
+
+ f = fopen(ubuntu_kver_file, "r");
+ if (!f)
+ return 0;
+
+ ret = fscanf(f, "%*s %*s %u.%u.%u\n", &major, &minor, &patch);
+ fclose(f);
+ if (ret != 3)
+ return 0;
+
+ return KERNEL_VERSION(major, minor, patch);
+}
+
+/* On Debian LINUX_VERSION_CODE doesn't correspond to info.release.
+ * Instead, it is provided in info.version. An example content of
+ * Debian 10 looks like the below.
+ *
+ * utsname::release 4.19.0-22-amd64
+ * utsname::version #1 SMP Debian 4.19.260-1 (2022-09-29)
+ *
+ * In the above, 4.19.260 is what kernel is actually expecting, while
+ * uname() call will return 4.19.0 in info.release.
+ */
+static __u32 get_debian_kernel_version(struct utsname *info)
+{
+ __u32 major, minor, patch;
+ char *p;
+
+ p = strstr(info->version, "Debian ");
+ if (!p) {
+ /* This is not a Debian kernel. */
+ return 0;
+ }
+
+ if (sscanf(p, "Debian %u.%u.%u", &major, &minor, &patch) != 3)
+ return 0;
+
+ return KERNEL_VERSION(major, minor, patch);
+}
+
+__u32 get_kernel_version(void)
+{
+ __u32 major, minor, patch, version;
+ struct utsname info;
+
+ /* Check if this is an Ubuntu kernel. */
+ version = get_ubuntu_kernel_version();
+ if (version != 0)
+ return version;
+
+ uname(&info);
+
+ /* Check if this is a Debian kernel. */
+ version = get_debian_kernel_version(&info);
+ if (version != 0)
+ return version;
+
+ if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
+ return 0;
+
+ return KERNEL_VERSION(major, minor, patch);
+}
+
static int probe_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insns_cnt,
char *log_buf, size_t log_buf_sz)
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 35104580870c..cb082a04ffa8 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -9,6 +9,7 @@
#include <linux/if_ether.h>
#include <linux/pkt_cls.h>
#include <linux/rtnetlink.h>
+#include <linux/netdev.h>
#include <sys/socket.h>
#include <errno.h>
#include <time.h>
@@ -39,9 +40,15 @@ struct xdp_id_md {
int ifindex;
__u32 flags;
struct xdp_link_info info;
+ __u64 feature_flags;
};
-static int libbpf_netlink_open(__u32 *nl_pid)
+struct xdp_features_md {
+ int ifindex;
+ __u64 flags;
+};
+
+static int libbpf_netlink_open(__u32 *nl_pid, int proto)
{
struct sockaddr_nl sa;
socklen_t addrlen;
@@ -51,7 +58,7 @@ static int libbpf_netlink_open(__u32 *nl_pid)
memset(&sa, 0, sizeof(sa));
sa.nl_family = AF_NETLINK;
- sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
+ sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, proto);
if (sock < 0)
return -errno;
@@ -212,14 +219,14 @@ done:
}
static int libbpf_netlink_send_recv(struct libbpf_nla_req *req,
- __dump_nlmsg_t parse_msg,
+ int proto, __dump_nlmsg_t parse_msg,
libbpf_dump_nlmsg_t parse_attr,
void *cookie)
{
__u32 nl_pid = 0;
int sock, ret;
- sock = libbpf_netlink_open(&nl_pid);
+ sock = libbpf_netlink_open(&nl_pid, proto);
if (sock < 0)
return sock;
@@ -238,6 +245,43 @@ out:
return ret;
}
+static int parse_genl_family_id(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
+ void *cookie)
+{
+ struct genlmsghdr *gnl = NLMSG_DATA(nh);
+ struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN);
+ struct nlattr *tb[CTRL_ATTR_FAMILY_ID + 1];
+ __u16 *id = cookie;
+
+ libbpf_nla_parse(tb, CTRL_ATTR_FAMILY_ID, na,
+ NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL);
+ if (!tb[CTRL_ATTR_FAMILY_ID])
+ return NL_CONT;
+
+ *id = libbpf_nla_getattr_u16(tb[CTRL_ATTR_FAMILY_ID]);
+ return NL_DONE;
+}
+
+static int libbpf_netlink_resolve_genl_family_id(const char *name,
+ __u16 len, __u16 *id)
+{
+ struct libbpf_nla_req req = {
+ .nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN),
+ .nh.nlmsg_type = GENL_ID_CTRL,
+ .nh.nlmsg_flags = NLM_F_REQUEST,
+ .gnl.cmd = CTRL_CMD_GETFAMILY,
+ .gnl.version = 2,
+ };
+ int err;
+
+ err = nlattr_add(&req, CTRL_ATTR_FAMILY_NAME, name, len);
+ if (err < 0)
+ return err;
+
+ return libbpf_netlink_send_recv(&req, NETLINK_GENERIC,
+ parse_genl_family_id, NULL, id);
+}
+
static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
__u32 flags)
{
@@ -271,7 +315,7 @@ static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
}
nlattr_end_nested(&req, nla);
- return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
+ return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL);
}
int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts)
@@ -357,6 +401,29 @@ static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb)
return 0;
}
+static int parse_xdp_features(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
+ void *cookie)
+{
+ struct genlmsghdr *gnl = NLMSG_DATA(nh);
+ struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN);
+ struct nlattr *tb[NETDEV_CMD_MAX + 1];
+ struct xdp_features_md *md = cookie;
+ __u32 ifindex;
+
+ libbpf_nla_parse(tb, NETDEV_CMD_MAX, na,
+ NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL);
+
+ if (!tb[NETDEV_A_DEV_IFINDEX] || !tb[NETDEV_A_DEV_XDP_FEATURES])
+ return NL_CONT;
+
+ ifindex = libbpf_nla_getattr_u32(tb[NETDEV_A_DEV_IFINDEX]);
+ if (ifindex != md->ifindex)
+ return NL_CONT;
+
+ md->flags = libbpf_nla_getattr_u64(tb[NETDEV_A_DEV_XDP_FEATURES]);
+ return NL_DONE;
+}
+
int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
{
struct libbpf_nla_req req = {
@@ -366,6 +433,10 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
.ifinfo.ifi_family = AF_PACKET,
};
struct xdp_id_md xdp_id = {};
+ struct xdp_features_md md = {
+ .ifindex = ifindex,
+ };
+ __u16 id;
int err;
if (!OPTS_VALID(opts, bpf_xdp_query_opts))
@@ -382,7 +453,7 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
xdp_id.ifindex = ifindex;
xdp_id.flags = xdp_flags;
- err = libbpf_netlink_send_recv(&req, __dump_link_nlmsg,
+ err = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, __dump_link_nlmsg,
get_xdp_info, &xdp_id);
if (err)
return libbpf_err(err);
@@ -393,6 +464,31 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
OPTS_SET(opts, skb_prog_id, xdp_id.info.skb_prog_id);
OPTS_SET(opts, attach_mode, xdp_id.info.attach_mode);
+ if (!OPTS_HAS(opts, feature_flags))
+ return 0;
+
+ err = libbpf_netlink_resolve_genl_family_id("netdev", sizeof("netdev"), &id);
+ if (err < 0)
+ return libbpf_err(err);
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_type = id;
+ req.gnl.cmd = NETDEV_CMD_DEV_GET;
+ req.gnl.version = 2;
+
+ err = nlattr_add(&req, NETDEV_A_DEV_IFINDEX, &ifindex, sizeof(ifindex));
+ if (err < 0)
+ return libbpf_err(err);
+
+ err = libbpf_netlink_send_recv(&req, NETLINK_GENERIC,
+ parse_xdp_features, NULL, &md);
+ if (err)
+ return libbpf_err(err);
+
+ opts->feature_flags = md.flags;
+
return 0;
}
@@ -493,7 +589,7 @@ static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags)
if (ret < 0)
return ret;
- return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
+ return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL);
}
static int tc_qdisc_create_excl(struct bpf_tc_hook *hook)
@@ -673,7 +769,8 @@ int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
info.opts = opts;
- ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info);
+ ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL,
+ &info);
if (ret < 0)
return libbpf_err(ret);
if (!info.processed)
@@ -739,7 +836,7 @@ static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
return ret;
}
- return libbpf_netlink_send_recv(&req, NULL, NULL, NULL);
+ return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL);
}
int bpf_tc_detach(const struct bpf_tc_hook *hook,
@@ -804,7 +901,8 @@ int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
info.opts = opts;
- ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info);
+ ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL,
+ &info);
if (ret < 0)
return libbpf_err(ret);
if (!info.processed)
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 3900d052ed19..975e265eab3b 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -178,7 +178,7 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh)
hlen += nlmsg_len(&err->msg);
attr = (struct nlattr *) ((void *) err + hlen);
- alen = nlh->nlmsg_len - hlen;
+ alen = (void *)nlh + nlh->nlmsg_len - (void *)attr;
if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen,
extack_policy) != 0) {
diff --git a/tools/lib/bpf/nlattr.h b/tools/lib/bpf/nlattr.h
index 4d15ae2ff812..d92d1c1de700 100644
--- a/tools/lib/bpf/nlattr.h
+++ b/tools/lib/bpf/nlattr.h
@@ -14,6 +14,7 @@
#include <errno.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
+#include <linux/genetlink.h>
/* avoid multiple definition of netlink features */
#define __LINUX_NETLINK_H
@@ -58,6 +59,7 @@ struct libbpf_nla_req {
union {
struct ifinfomsg ifinfo;
struct tcmsg tc;
+ struct genlmsghdr gnl;
};
char buf[128];
};
@@ -89,11 +91,21 @@ static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla)
return *(uint8_t *)libbpf_nla_data(nla);
}
+static inline uint16_t libbpf_nla_getattr_u16(const struct nlattr *nla)
+{
+ return *(uint16_t *)libbpf_nla_data(nla);
+}
+
static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla)
{
return *(uint32_t *)libbpf_nla_data(nla);
}
+static inline uint64_t libbpf_nla_getattr_u64(const struct nlattr *nla)
+{
+ return *(uint64_t *)libbpf_nla_data(nla);
+}
+
static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla)
{
return (const char *)libbpf_nla_data(nla);
diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h
index fdfd235e52c4..0bd4c135acc2 100644
--- a/tools/lib/bpf/usdt.bpf.h
+++ b/tools/lib/bpf/usdt.bpf.h
@@ -130,7 +130,10 @@ int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
if (!spec)
return -ESRCH;
- if (arg_num >= BPF_USDT_MAX_ARG_CNT || arg_num >= spec->arg_cnt)
+ if (arg_num >= BPF_USDT_MAX_ARG_CNT)
+ return -ENOENT;
+ barrier_var(arg_num);
+ if (arg_num >= spec->arg_cnt)
return -ENOENT;
arg_spec = &spec->args[arg_num];
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 4aa5bba956ff..116fecf80ca1 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -48,3 +48,4 @@ xskxceiver
xdp_redirect_multi
xdp_synproxy
xdp_hw_metadata
+xdp_features
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index 50924611e5bb..b89eb87034e4 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -1,93 +1,24 @@
# TEMPORARY
# Alphabetical order
-atomics # attach(add): actual -524 <= expected 0 (trampoline)
bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
bpf_cookie # failed to open_and_load program: -524 (trampoline)
-bpf_iter_setsockopt # JIT does not support calling kernel function (kfunc)
bpf_loop # attaches to __x64_sys_nanosleep
-bpf_mod_race # BPF trampoline
-bpf_nf # JIT does not support calling kernel function
-bpf_tcp_ca # JIT does not support calling kernel function (kfunc)
-cb_refs # expected error message unexpected error: -524 (trampoline)
-cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc)
-cgrp_kfunc # JIT does not support calling kernel function
cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
-core_read_macros # unknown func bpf_probe_read#4 (overlapping)
-cpumask # JIT does not support calling kernel function
-d_path # failed to auto-attach program 'prog_stat': -524 (trampoline)
-decap_sanity # JIT does not support calling kernel function (kfunc)
-deny_namespace # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
-dummy_st_ops # test_run unexpected error: -524 (errno 524) (trampoline)
-fentry_fexit # fentry attach failed: -524 (trampoline)
-fentry_test # fentry_first_attach unexpected error: -524 (trampoline)
-fexit_bpf2bpf # freplace_attach_trace unexpected error: -524 (trampoline)
fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
-fexit_stress # fexit attach failed prog 0 failed: -524 (trampoline)
-fexit_test # fexit_first_attach unexpected error: -524 (trampoline)
-get_func_args_test # trampoline
-get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (trampoline)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
-htab_update # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
-jit_probe_mem # jit_probe_mem__open_and_load unexpected error: -524 (kfunc)
-kfree_skb # attach fentry unexpected error: -524 (trampoline)
-kfunc_call # 'bpf_prog_active': not found in kernel BTF (?)
-kfunc_dynptr_param # JIT does not support calling kernel function (kfunc)
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test # relies on fentry
ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
ksyms_module_libbpf # JIT does not support calling kernel function (kfunc)
ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?)
-libbpf_get_fd_by_id_opts # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
-linked_list # JIT does not support calling kernel function (kfunc)
-lookup_key # JIT does not support calling kernel function (kfunc)
-lru_bug # prog 'printk': failed to auto-attach: -524
-map_kptr # failed to open_and_load program: -524 (trampoline)
-modify_return # modify_return attach failed: -524 (trampoline)
module_attach # skel_attach skeleton attach failed: -524 (trampoline)
-mptcp
-nested_trust # JIT does not support calling kernel function
-netcnt # failed to load BPF skeleton 'netcnt_prog': -7 (?)
-probe_user # check_kprobe_res wrong kprobe res from probe read (?)
-rcu_read_lock # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
-recursion # skel_attach unexpected error: -524 (trampoline)
ringbuf # skel_load skeleton load failed (?)
-select_reuseport # intermittently fails on new s390x setup
-send_signal # intermittently fails to receive signal
-setget_sockopt # attach unexpected error: -524 (trampoline)
-sk_assign # Can't read on server: Invalid argument (?)
-sk_lookup # endianness problem
-sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (trampoline)
-skc_to_unix_sock # could not attach BPF object unexpected error: -524 (trampoline)
-socket_cookie # prog_attach unexpected error: -524 (trampoline)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
-tailcalls # tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls (?)
-task_kfunc # JIT does not support calling kernel function
-task_local_storage # failed to auto-attach program 'trace_exit_creds': -524 (trampoline)
-test_bpffs # bpffs test failed 255 (iterator)
-test_bprm_opts # failed to auto-attach program 'secure_exec': -524 (trampoline)
-test_ima # failed to auto-attach program 'ima': -524 (trampoline)
-test_local_storage # failed to auto-attach program 'unlink_hook': -524 (trampoline)
test_lsm # attach unexpected error: -524 (trampoline)
-test_overhead # attach_fentry unexpected error: -524 (trampoline)
-test_profiler # unknown func bpf_probe_read_str#45 (overlapping)
-timer # failed to auto-attach program 'test1': -524 (trampoline)
-timer_crash # trampoline
-timer_mim # failed to auto-attach program 'test1': -524 (trampoline)
-trace_ext # failed to auto-attach program 'test_pkt_md_access_new': -524 (trampoline)
trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?)
trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?)
-tracing_struct # failed to auto-attach: -524 (trampoline)
-trampoline_count # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
-type_cast # JIT does not support calling kernel function
unpriv_bpf_disabled # fentry
user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?)
verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
-verify_pkcs7_sig # JIT does not support calling kernel function (kfunc)
-vmlinux # failed to auto-attach program 'handle__fentry': -524 (trampoline)
-xdp_adjust_tail # case-128 err 0 errno 28 retval 1 size 128 expect-size 3520 (?)
xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
-xdp_bpf2bpf # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
-xdp_do_redirect # prog_run_max_size unexpected error: -22 (errno 22)
xdp_metadata # JIT does not support calling kernel function (kfunc)
-xdp_synproxy # JIT does not support calling kernel function (kfunc)
-xfrm_info # JIT does not support calling kernel function (kfunc)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 53eae7be8dff..c4b5c44cdee2 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -22,10 +22,11 @@ endif
BPF_GCC ?= $(shell command -v bpf-gcc;)
SAN_CFLAGS ?=
+SAN_LDFLAGS ?= $(SAN_CFLAGS)
CFLAGS += -g -O0 -rdynamic -Wall -Werror $(GENFLAGS) $(SAN_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT)
-LDFLAGS += $(SAN_CFLAGS)
+LDFLAGS += $(SAN_LDFLAGS)
LDLIBS += -lelf -lz -lrt -lpthread
# Silence some warnings when compiled with clang
@@ -73,7 +74,8 @@ TEST_PROGS := test_kmod.sh \
test_bpftool.sh \
test_bpftool_metadata.sh \
test_doc_build.sh \
- test_xsk.sh
+ test_xsk.sh \
+ test_xdp_features.sh
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh ima_setup.sh verify_sig_setup.sh \
@@ -83,7 +85,8 @@ TEST_PROGS_EXTENDED := with_addr.sh \
TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
- xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata
+ xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
+ xdp_features
TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read $(OUTPUT)/sign-file
TEST_GEN_FILES += liburandom_read.so
@@ -189,7 +192,7 @@ $(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c
$(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
$(call msg,BINARY,,$@)
$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
- liburandom_read.so $(filter-out -static,$(LDLIBS)) \
+ -lurandom_read $(filter-out -static,$(LDLIBS)) -L$(OUTPUT) \
-fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
-Wl,-rpath=. -o $@
@@ -212,7 +215,9 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF) \
BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf \
- BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \
+ BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) \
+ EXTRA_CFLAGS='-g -O0 $(SAN_CFLAGS)' \
+ EXTRA_LDFLAGS='$(SAN_LDFLAGS)' && \
cp $(RUNQSLOWER_OUTPUT)runqslower $@
TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL)
@@ -246,7 +251,7 @@ BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
- ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \
+ ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" \
EXTRA_CFLAGS='-g -O0' \
OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
@@ -269,7 +274,8 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
- EXTRA_CFLAGS='-g -O0' \
+ EXTRA_CFLAGS='-g -O0 $(SAN_CFLAGS)' \
+ EXTRA_LDFLAGS='$(SAN_LDFLAGS)' \
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
ifneq ($(BPFOBJ),$(HOST_BPFOBJ))
@@ -278,7 +284,8 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
| $(HOST_BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
EXTRA_CFLAGS='-g -O0' ARCH= CROSS_COMPILE= \
- OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \
+ OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
+ CC="$(HOSTCC)" LD="$(HOSTLD)" \
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
endif
@@ -299,7 +306,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \
$(TOOLSDIR)/lib/ctype.c \
$(TOOLSDIR)/lib/str_error_r.c
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \
- CC=$(HOSTCC) LD=$(HOSTLD) AR=$(HOSTAR) \
+ CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \
LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \
OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ)
@@ -385,6 +392,7 @@ test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.bpf.o test_subskeleton
test_usdt.skel.h-deps := test_usdt.bpf.o test_usdt_multispec.bpf.o
xsk_xdp_progs.skel.h-deps := xsk_xdp_progs.bpf.o
xdp_hw_metadata.skel.h-deps := xdp_hw_metadata.bpf.o
+xdp_features.skel.h-deps := xdp_features.bpf.o
LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
@@ -519,7 +527,8 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$$(call msg,BINARY,,$$@)
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
$(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@
- $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/bootstrap/bpftool $(if $2,$2/)bpftool
+ $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/bootstrap/bpftool \
+ $(OUTPUT)/$(if $2,$2/)bpftool
endef
@@ -586,6 +595,10 @@ $(OUTPUT)/xdp_hw_metadata: xdp_hw_metadata.c $(OUTPUT)/network_helpers.o $(OUTPU
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+$(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp_features.skel.h | $(OUTPUT)
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+
# Make sure we are able to include and link libbpf against c++.
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
$(call msg,CXX,,$@)
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 5085fea3cac5..46500636d8cd 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -59,7 +59,7 @@ bpf_testmod_test_struct_arg_5(void) {
return bpf_testmod_test_struct_arg_result;
}
-noinline void
+__bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)
{
*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
diff --git a/tools/testing/selftests/bpf/netcnt_common.h b/tools/testing/selftests/bpf/netcnt_common.h
index 0ab1c88041cd..2d4a58e4e39c 100644
--- a/tools/testing/selftests/bpf/netcnt_common.h
+++ b/tools/testing/selftests/bpf/netcnt_common.h
@@ -8,11 +8,11 @@
/* sizeof(struct bpf_local_storage_elem):
*
- * It really is about 128 bytes on x86_64, but allocate more to account for
- * possible layout changes, different architectures, etc.
+ * It is about 128 bytes on x86_64 and 512 bytes on s390x, but allocate more to
+ * account for possible layout changes, different architectures, etc.
* The kernel will wrap up to PAGE_SIZE internally anyway.
*/
-#define SIZEOF_BPF_LOCAL_STORAGE_ELEM 256
+#define SIZEOF_BPF_LOCAL_STORAGE_ELEM 768
/* Try to estimate kernel's BPF_LOCAL_STORAGE_MAX_VALUE_SIZE: */
#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE (0xFFFF - \
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 9566d9d2f6ee..56374c8b5436 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -33,8 +33,8 @@ void test_attach_probe(void)
struct test_attach_probe* skel;
ssize_t uprobe_offset, ref_ctr_offset;
struct bpf_link *uprobe_err_link;
+ FILE *devnull;
bool legacy;
- char *mem;
/* Check if new-style kprobe/uprobe API is supported.
* Kernels that support new FD-based kprobe and uprobe BPF attachment
@@ -147,7 +147,7 @@ void test_attach_probe(void)
/* test attach by name for a library function, using the library
* as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
*/
- uprobe_opts.func_name = "malloc";
+ uprobe_opts.func_name = "fopen";
uprobe_opts.retprobe = false;
skel->links.handle_uprobe_byname2 =
bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
@@ -157,7 +157,7 @@ void test_attach_probe(void)
if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
goto cleanup;
- uprobe_opts.func_name = "free";
+ uprobe_opts.func_name = "fclose";
uprobe_opts.retprobe = true;
skel->links.handle_uretprobe_byname2 =
bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
@@ -195,8 +195,8 @@ void test_attach_probe(void)
usleep(1);
/* trigger & validate shared library u[ret]probes attached by name */
- mem = malloc(1);
- free(mem);
+ devnull = fopen("/dev/null", "r");
+ fclose(devnull);
/* trigger & validate uprobe & uretprobe */
trigger_func();
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 2be2d61954bc..26b2d1bffdfd 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -472,6 +472,7 @@ static void lsm_subtest(struct test_bpf_cookie *skel)
int prog_fd;
int lsm_fd = -1;
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+ int err;
skel->bss->lsm_res = 0;
@@ -482,8 +483,9 @@ static void lsm_subtest(struct test_bpf_cookie *skel)
if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create"))
goto cleanup;
- stack_mprotect();
- if (!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ err = stack_mprotect();
+ if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
+ !ASSERT_EQ(errno, EPERM, "stack_mprotect"))
goto cleanup;
usleep(1);
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
index 33a2776737e7..2cc759956e3b 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
@@ -16,7 +16,7 @@
struct socket_cookie {
__u64 cookie_key;
- __u32 cookie_value;
+ __u64 cookie_value;
};
static void test_tp_btf(int cgroup_fd)
diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
index 0b2f73b88c53..2853883b7cbb 100644
--- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
+++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
@@ -80,6 +80,6 @@ fail:
bpf_tc_hook_destroy(&qdisc_hook);
close_netns(nstoken);
}
- system("ip netns del " NS_TEST " >& /dev/null");
+ system("ip netns del " NS_TEST " &> /dev/null");
decap_sanity__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 5a7e6011f6bf..596536def43d 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -2,14 +2,19 @@
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-/* that's kernel internal BPF_MAX_TRAMP_PROGS define */
-#define CNT 38
-
void serial_test_fexit_stress(void)
{
- int fexit_fd[CNT] = {};
- int link_fd[CNT] = {};
- int err, i;
+ int bpf_max_tramp_links, err, i;
+ int *fd, *fexit_fd, *link_fd;
+
+ bpf_max_tramp_links = get_bpf_max_tramp_links();
+ if (!ASSERT_GE(bpf_max_tramp_links, 1, "bpf_max_tramp_links"))
+ return;
+ fd = calloc(bpf_max_tramp_links * 2, sizeof(*fd));
+ if (!ASSERT_OK_PTR(fd, "fd"))
+ return;
+ fexit_fd = fd;
+ link_fd = fd + bpf_max_tramp_links;
const struct bpf_insn trace_program[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -28,7 +33,7 @@ void serial_test_fexit_stress(void)
goto out;
trace_opts.attach_btf_id = err;
- for (i = 0; i < CNT; i++) {
+ for (i = 0; i < bpf_max_tramp_links; i++) {
fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
trace_program,
sizeof(trace_program) / sizeof(struct bpf_insn),
@@ -44,10 +49,11 @@ void serial_test_fexit_stress(void)
ASSERT_OK(err, "bpf_prog_test_run_opts");
out:
- for (i = 0; i < CNT; i++) {
+ for (i = 0; i < bpf_max_tramp_links; i++) {
if (link_fd[i])
close(link_fd[i]);
if (fexit_fd[i])
close(fexit_fd[i]);
}
+ free(fd);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
index 73579370bfbd..c07991544a78 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
@@ -36,7 +36,7 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
"cb32_0 %x != %x\n",
meta->cb32_0, cb.cb32[0]))
return;
- if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth",
+ if (CHECK(pkt_v6->eth.h_proto != htons(ETH_P_IPV6), "check_eth",
"h_proto %x\n", pkt_v6->eth.h_proto))
return;
if (CHECK(pkt_v6->iph.nexthdr != 6, "check_ip",
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index 5af1ee8f0e6e..a543742cd7bd 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -72,10 +72,12 @@ static struct kfunc_test_params kfunc_tests[] = {
/* success cases */
TC_TEST(kfunc_call_test1, 12),
TC_TEST(kfunc_call_test2, 3),
+ TC_TEST(kfunc_call_test4, -1234),
TC_TEST(kfunc_call_test_ref_btf_id, 0),
TC_TEST(kfunc_call_test_get_mem, 42),
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
+ TC_TEST(kfunc_call_test_static_unused_arg, 0),
};
struct syscall_test_args {
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
index 3e190ed63976..1374b626a985 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
@@ -29,7 +29,23 @@ static int stop, duration;
static bool
configure_stack(void)
{
+ char tc_version[128];
char tc_cmd[BUFSIZ];
+ char *prog;
+ FILE *tc;
+
+ /* Check whether tc is built with libbpf. */
+ tc = popen("tc -V", "r");
+ if (CHECK_FAIL(!tc))
+ return false;
+ if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc)))
+ return false;
+ if (strstr(tc_version, ", libbpf "))
+ prog = "test_sk_assign_libbpf.bpf.o";
+ else
+ prog = "test_sk_assign.bpf.o";
+ if (CHECK_FAIL(pclose(tc)))
+ return false;
/* Move to a new networking namespace */
if (CHECK_FAIL(unshare(CLONE_NEWNET)))
@@ -46,8 +62,8 @@ configure_stack(void)
/* Load qdisc, BPF program */
if (CHECK_FAIL(system("tc qdisc add dev lo clsact")))
return false;
- sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
- "direct-action object-file ./test_sk_assign.bpf.o",
+ sprintf(tc_cmd, "%s %s %s %s %s", "tc filter add dev lo ingress bpf",
+ "direct-action object-file", prog,
"section tc",
(env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose");
if (CHECK(system(tc_cmd), "BPF load failed;",
@@ -129,15 +145,12 @@ get_port(int fd)
static ssize_t
rcv_msg(int srv_client, int type)
{
- struct sockaddr_storage ss;
char buf[BUFSIZ];
- socklen_t slen;
if (type == SOCK_STREAM)
return read(srv_client, &buf, sizeof(buf));
else
- return recvfrom(srv_client, &buf, sizeof(buf), 0,
- (struct sockaddr *)&ss, &slen);
+ return recvfrom(srv_client, &buf, sizeof(buf), 0, NULL, NULL);
}
static int
diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
index 244c01125126..16175d579bc7 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
@@ -75,7 +75,8 @@ static int test_lsm(struct lsm *skel)
skel->bss->monitored_pid = getpid();
err = stack_mprotect();
- if (!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
+ !ASSERT_EQ(errno, EPERM, "stack_mprotect"))
return err;
ASSERT_EQ(skel->bss->mprotect_count, 1, "mprotect_count");
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 564b75bc087f..8fd4c0d78089 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -2,8 +2,6 @@
#define _GNU_SOURCE
#include <test_progs.h>
-#define MAX_TRAMP_PROGS 38
-
struct inst {
struct bpf_object *obj;
struct bpf_link *link;
@@ -37,14 +35,21 @@ void serial_test_trampoline_count(void)
{
char *file = "test_trampoline_count.bpf.o";
char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" };
- struct inst inst[MAX_TRAMP_PROGS + 1] = {};
+ int bpf_max_tramp_links, err, i, prog_fd;
struct bpf_program *prog;
struct bpf_link *link;
- int prog_fd, err, i;
+ struct inst *inst;
LIBBPF_OPTS(bpf_test_run_opts, opts);
+ bpf_max_tramp_links = get_bpf_max_tramp_links();
+ if (!ASSERT_GE(bpf_max_tramp_links, 1, "bpf_max_tramp_links"))
+ return;
+ inst = calloc(bpf_max_tramp_links + 1, sizeof(*inst));
+ if (!ASSERT_OK_PTR(inst, "inst"))
+ return;
+
/* attach 'allowed' trampoline programs */
- for (i = 0; i < MAX_TRAMP_PROGS; i++) {
+ for (i = 0; i < bpf_max_tramp_links; i++) {
prog = load_prog(file, progs[i % ARRAY_SIZE(progs)], &inst[i]);
if (!prog)
goto cleanup;
@@ -91,4 +96,5 @@ cleanup:
bpf_link__destroy(inst[i].link);
bpf_object__close(inst[i].obj);
}
+ free(inst);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
index 82807def0d24..6558c857e620 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
@@ -16,10 +16,10 @@ static noinline int autoattach_trigger_func(int arg1, int arg2, int arg3,
void test_uprobe_autoattach(void)
{
+ const char *devnull_str = "/dev/null";
struct test_uprobe_autoattach *skel;
int trigger_ret;
- size_t malloc_sz = 1;
- char *mem;
+ FILE *devnull;
skel = test_uprobe_autoattach__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
@@ -36,16 +36,18 @@ void test_uprobe_autoattach(void)
skel->bss->test_pid = getpid();
/* trigger & validate shared library u[ret]probes attached by name */
- mem = malloc(malloc_sz);
+ devnull = fopen(devnull_str, "r");
ASSERT_EQ(skel->bss->uprobe_byname_parm1, 1, "check_uprobe_byname_parm1");
ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran");
ASSERT_EQ(skel->bss->uretprobe_byname_rc, trigger_ret, "check_uretprobe_byname_rc");
ASSERT_EQ(skel->bss->uretprobe_byname_ret, trigger_ret, "check_uretprobe_byname_ret");
ASSERT_EQ(skel->bss->uretprobe_byname_ran, 2, "check_uretprobe_byname_ran");
- ASSERT_EQ(skel->bss->uprobe_byname2_parm1, malloc_sz, "check_uprobe_byname2_parm1");
+ ASSERT_EQ(skel->bss->uprobe_byname2_parm1, (__u64)(long)devnull_str,
+ "check_uprobe_byname2_parm1");
ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran");
- ASSERT_EQ(skel->bss->uretprobe_byname2_rc, mem, "check_uretprobe_byname2_rc");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_rc, (__u64)(long)devnull,
+ "check_uretprobe_byname2_rc");
ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran");
ASSERT_EQ(skel->bss->a[0], 1, "arg1");
@@ -67,7 +69,7 @@ void test_uprobe_autoattach(void)
ASSERT_EQ(skel->bss->a[7], 8, "arg8");
#endif
- free(mem);
+ fclose(devnull);
cleanup:
test_uprobe_autoattach__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c
index 9ad9da0f215e..56ed1eb9b527 100644
--- a/tools/testing/selftests/bpf/prog_tests/usdt.c
+++ b/tools/testing/selftests/bpf/prog_tests/usdt.c
@@ -314,6 +314,7 @@ static FILE *urand_spawn(int *pid)
if (fscanf(f, "%d", pid) != 1) {
pclose(f);
+ errno = EINVAL;
return NULL;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
index 579d6ee83ce0..dd7f2bc70048 100644
--- a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
+++ b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
@@ -61,6 +61,9 @@ static bool kfunc_not_supported;
static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
va_list args)
{
+ if (level == LIBBPF_WARN)
+ vprintf(fmt, args);
+
if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n"))
return 0;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
index 39973ea1ce43..f09505f8b038 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -76,10 +76,15 @@ static void test_xdp_adjust_tail_grow2(void)
{
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
char buf[4096]; /* avoid segfault: large buf to hold grow results */
- int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/;
struct bpf_object *obj;
int err, cnt, i;
int max_grow, prog_fd;
+ /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
+#if defined(__s390x__)
+ int tailroom = 512;
+#else
+ int tailroom = 320;
+#endif
LIBBPF_OPTS(bpf_test_run_opts, tattr,
.repeat = 1,
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
index a50971c6cf4a..2666c84dbd01 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
@@ -4,10 +4,12 @@
#include <net/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
+#include <linux/if_link.h>
#include <linux/ipv6.h>
#include <linux/in6.h>
#include <linux/udp.h>
#include <bpf/bpf_endian.h>
+#include <uapi/linux/netdev.h>
#include "test_xdp_do_redirect.skel.h"
#define SYS(fmt, ...) \
@@ -65,7 +67,11 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) -
* sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes
*/
+#if defined(__s390x__)
+#define MAX_PKT_SIZE 3176
+#else
#define MAX_PKT_SIZE 3368
+#endif
static void test_max_pkt_size(int fd)
{
char data[MAX_PKT_SIZE + 1] = {};
@@ -92,7 +98,7 @@ void test_xdp_do_redirect(void)
struct test_xdp_do_redirect *skel = NULL;
struct nstoken *nstoken = NULL;
struct bpf_link *link;
-
+ LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
struct xdp_md ctx_in = { .data = sizeof(__u32),
.data_end = sizeof(data) };
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
@@ -153,6 +159,29 @@ void test_xdp_do_redirect(void)
!ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst"))
goto out;
+ /* Check xdp features supported by veth driver */
+ err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_src bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "veth_src query_opts.feature_flags"))
+ goto out;
+
+ err = bpf_xdp_query(ifindex_dst, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_dst bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "veth_dst query_opts.feature_flags"))
+ goto out;
+
memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN);
skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */
skel->rodata->ifindex_in = ifindex_src;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c
index cd3aa340e65e..286c21ecdc65 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c
@@ -8,6 +8,7 @@ void serial_test_xdp_info(void)
{
__u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id;
const char *file = "./xdp_dummy.bpf.o";
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
struct bpf_prog_info info = {};
struct bpf_object *obj;
int err, prog_fd;
@@ -61,6 +62,13 @@ void serial_test_xdp_info(void)
if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id))
goto out;
+ /* Check xdp features supported by lo device */
+ opts.feature_flags = ~0;
+ err = bpf_xdp_query(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &opts);
+ if (!ASSERT_OK(err, "bpf_xdp_query"))
+ goto out;
+
+ ASSERT_EQ(opts.feature_flags, 0, "opts.feature_flags");
out:
bpf_xdp_detach(IFINDEX_LO, 0, NULL);
out_close:
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
index e033d48288c0..aa4beae99f4f 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
@@ -121,7 +121,7 @@ static void close_xsk(struct xsk *xsk)
xsk_umem__delete(xsk->umem);
if (xsk->socket)
xsk_socket__delete(xsk->socket);
- munmap(xsk->umem, UMEM_SIZE);
+ munmap(xsk->umem_area, UMEM_SIZE);
}
static void ip_csum(struct iphdr *iph)
@@ -205,9 +205,8 @@ static void complete_tx(struct xsk *xsk)
if (ASSERT_EQ(xsk_ring_cons__peek(&xsk->comp, 1, &idx), 1, "xsk_ring_cons__peek")) {
addr = *xsk_ring_cons__comp_addr(&xsk->comp, idx);
- printf("%p: refill idx=%u addr=%llx\n", xsk, idx, addr);
- *xsk_ring_prod__fill_addr(&xsk->fill, idx) = addr;
- xsk_ring_prod__submit(&xsk->fill, 1);
+ printf("%p: complete tx idx=%u addr=%llx\n", xsk, idx, addr);
+ xsk_ring_cons__release(&xsk->comp, 1);
}
}
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
index f636e50be259..7daa8f5720b9 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -3,6 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
+extern long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym;
extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
__u32 c, __u64 d) __ksym;
@@ -16,6 +17,24 @@ extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym;
extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym;
extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
+extern u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym;
+
+SEC("tc")
+int kfunc_call_test4(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ long tmp;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ tmp = bpf_kfunc_call_test4(-3, -30, -200, -1000);
+ return (tmp >> 32) + tmp;
+}
SEC("tc")
int kfunc_call_test2(struct __sk_buff *skb)
@@ -163,4 +182,14 @@ int kfunc_call_test_get_mem(struct __sk_buff *skb)
return ret;
}
+SEC("tc")
+int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
+{
+
+ u32 expected = 5, actual;
+
+ actual = bpf_kfunc_call_test_static_unused_arg(expected, 0xdeadbeef);
+ return actual != expected ? -1 : 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c
index d8d8af623bc2..dc93887ed34c 100644
--- a/tools/testing/selftests/bpf/progs/lsm.c
+++ b/tools/testing/selftests/bpf/progs/lsm.c
@@ -6,9 +6,10 @@
#include "bpf_misc.h"
#include "vmlinux.h"
+#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include <errno.h>
+#include <errno.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -164,8 +165,8 @@ int copy_test = 0;
SEC("fentry.s/" SYS_PREFIX "sys_setdomainname")
int BPF_PROG(test_sys_setdomainname, struct pt_regs *regs)
{
- void *ptr = (void *)PT_REGS_PARM1(regs);
- int len = PT_REGS_PARM2(regs);
+ void *ptr = (void *)PT_REGS_PARM1_SYSCALL(regs);
+ int len = PT_REGS_PARM2_SYSCALL(regs);
int buf = 0;
long ret;
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 92331053dba3..68a3fd7387a4 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -156,10 +156,10 @@ probe_read_lim(void* dst, void* src, unsigned long len, unsigned long max)
{
len = len < max ? len : max;
if (len > 1) {
- if (bpf_probe_read(dst, len, src))
+ if (bpf_probe_read_kernel(dst, len, src))
return 0;
} else if (len == 1) {
- if (bpf_probe_read(dst, 1, src))
+ if (bpf_probe_read_kernel(dst, 1, src))
return 0;
}
return len;
@@ -216,7 +216,8 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
#endif
for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
filepart_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(cgroup_node, name));
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(cgroup_node, name));
if (!cgroup_node)
return payload;
if (cgroup_node == cgroup_root_node)
@@ -303,7 +304,8 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
cgroup_data->cgroup_full_length = 0;
size_t cgroup_root_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(root_kernfs, name));
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(root_kernfs, name));
barrier_var(cgroup_root_length);
if (cgroup_root_length <= MAX_PATH) {
barrier_var(cgroup_root_length);
@@ -312,7 +314,8 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
}
size_t cgroup_proc_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(proc_kernfs, name));
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(proc_kernfs, name));
barrier_var(cgroup_proc_length);
if (cgroup_proc_length <= MAX_PATH) {
barrier_var(cgroup_proc_length);
@@ -395,7 +398,8 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
arr_struct = bpf_map_lookup_elem(&data_heap, &zero);
if (arr_struct == NULL)
return 0;
- bpf_probe_read(&arr_struct->array[0], sizeof(arr_struct->array[0]), kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[0],
+ sizeof(arr_struct->array[0]), kill_data);
} else {
int index = get_var_spid_index(arr_struct, spid);
@@ -409,8 +413,9 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == 0) {
- bpf_probe_read(&arr_struct->array[i],
- sizeof(arr_struct->array[i]), kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[i],
+ sizeof(arr_struct->array[i]),
+ kill_data);
bpf_map_update_elem(&var_tpid_to_data, &tpid,
arr_struct, 0);
@@ -427,17 +432,17 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
if (delta_sec < STALE_INFO) {
kill_data->kill_count++;
kill_data->last_kill_time = bpf_ktime_get_ns();
- bpf_probe_read(&arr_struct->array[index],
- sizeof(arr_struct->array[index]),
- kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
} else {
struct var_kill_data_t* kill_data =
get_var_kill_data(ctx, spid, tpid, sig);
if (kill_data == NULL)
return 0;
- bpf_probe_read(&arr_struct->array[index],
- sizeof(arr_struct->array[index]),
- kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
}
}
bpf_map_update_elem(&var_tpid_to_data, &tpid, arr_struct, 0);
@@ -487,8 +492,9 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
#pragma unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
- filepart_length = bpf_probe_read_str(payload, MAX_PATH,
- BPF_CORE_READ(filp_dentry, d_name.name));
+ filepart_length =
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp_dentry, d_name.name));
barrier_var(filepart_length);
if (filepart_length > MAX_PATH)
break;
@@ -572,7 +578,8 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
sysctl_data->sysctl_val_length = 0;
sysctl_data->sysctl_path_length = 0;
- size_t sysctl_val_length = bpf_probe_read_str(payload, CTL_MAXNAME, buf);
+ size_t sysctl_val_length = bpf_probe_read_kernel_str(payload,
+ CTL_MAXNAME, buf);
barrier_var(sysctl_val_length);
if (sysctl_val_length <= CTL_MAXNAME) {
barrier_var(sysctl_val_length);
@@ -580,8 +587,10 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
payload += sysctl_val_length;
}
- size_t sysctl_path_length = bpf_probe_read_str(payload, MAX_PATH,
- BPF_CORE_READ(filp, f_path.dentry, d_name.name));
+ size_t sysctl_path_length =
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp, f_path.dentry,
+ d_name.name));
barrier_var(sysctl_path_length);
if (sysctl_path_length <= MAX_PATH) {
barrier_var(sysctl_path_length);
@@ -638,7 +647,8 @@ int raw_tracepoint__sched_process_exit(void* ctx)
struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
- bpf_probe_read(kill_data, sizeof(*past_kill_data), past_kill_data);
+ bpf_probe_read_kernel(kill_data, sizeof(*past_kill_data),
+ past_kill_data);
void* payload = kill_data->payload;
size_t offset = kill_data->payload_length;
if (offset >= MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN)
@@ -656,8 +666,10 @@ int raw_tracepoint__sched_process_exit(void* ctx)
payload += comm_length;
}
- size_t cgroup_proc_length = bpf_probe_read_str(payload, KILL_TARGET_LEN,
- BPF_CORE_READ(proc_kernfs, name));
+ size_t cgroup_proc_length =
+ bpf_probe_read_kernel_str(payload,
+ KILL_TARGET_LEN,
+ BPF_CORE_READ(proc_kernfs, name));
barrier_var(cgroup_proc_length);
if (cgroup_proc_length <= KILL_TARGET_LEN) {
barrier_var(cgroup_proc_length);
@@ -718,7 +730,8 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
proc_exec_data->parent_start_time = BPF_CORE_READ(parent_task, start_time);
const char* filename = BPF_CORE_READ(bprm, filename);
- size_t bin_path_length = bpf_probe_read_str(payload, MAX_FILENAME_LEN, filename);
+ size_t bin_path_length =
+ bpf_probe_read_kernel_str(payload, MAX_FILENAME_LEN, filename);
barrier_var(bin_path_length);
if (bin_path_length <= MAX_FILENAME_LEN) {
barrier_var(bin_path_length);
@@ -922,7 +935,8 @@ int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
filemod_data->payload);
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
- size_t len = bpf_probe_read_str(payload, MAX_FILEPATH_LENGTH, oldname);
+ size_t len = bpf_probe_read_kernel_str(payload, MAX_FILEPATH_LENGTH,
+ oldname);
barrier_var(len);
if (len <= MAX_FILEPATH_LENGTH) {
barrier_var(len);
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index a1e45fec8938..3b5dc34d23e9 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -92,18 +92,19 @@ int handle_uretprobe_byname(struct pt_regs *ctx)
}
SEC("uprobe")
-int handle_uprobe_byname2(struct pt_regs *ctx)
+int BPF_UPROBE(handle_uprobe_byname2, const char *pathname, const char *mode)
{
- unsigned int size = PT_REGS_PARM1(ctx);
+ char mode_buf[2] = {};
- /* verify malloc size */
- if (size == 1)
+ /* verify fopen mode */
+ bpf_probe_read_user(mode_buf, sizeof(mode_buf), mode);
+ if (mode_buf[0] == 'r' && mode_buf[1] == 0)
uprobe_byname2_res = 7;
return 0;
}
SEC("uretprobe")
-int handle_uretprobe_byname2(struct pt_regs *ctx)
+int BPF_URETPROBE(handle_uretprobe_byname2, void *ret)
{
uretprobe_byname2_res = 8;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
index 98c6493d9b91..21b19b758c4e 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_assign.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
@@ -16,6 +16,16 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#if defined(IPROUTE2_HAVE_LIBBPF)
+/* Use a new-style map definition. */
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __type(key, int);
+ __type(value, __u64);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+ __uint(max_entries, 1);
+} server_map SEC(".maps");
+#else
/* Pin map under /sys/fs/bpf/tc/globals/<map name> */
#define PIN_GLOBAL_NS 2
@@ -35,6 +45,7 @@ struct {
.max_elem = 1,
.pinning = PIN_GLOBAL_NS,
};
+#endif
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
new file mode 100644
index 000000000000..dcf46adfda04
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define IPROUTE2_HAVE_LIBBPF
+#include "test_sk_assign.c"
diff --git a/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
index 774ddeb45898..da4bf89d004c 100644
--- a/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
+++ b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
@@ -13,9 +13,9 @@ int uprobe_byname_ran = 0;
int uretprobe_byname_rc = 0;
int uretprobe_byname_ret = 0;
int uretprobe_byname_ran = 0;
-size_t uprobe_byname2_parm1 = 0;
+u64 uprobe_byname2_parm1 = 0;
int uprobe_byname2_ran = 0;
-char *uretprobe_byname2_rc = NULL;
+u64 uretprobe_byname2_rc = 0;
int uretprobe_byname2_ran = 0;
int test_pid;
@@ -88,28 +88,28 @@ int BPF_URETPROBE(handle_uretprobe_byname, int ret)
}
-SEC("uprobe/libc.so.6:malloc")
-int handle_uprobe_byname2(struct pt_regs *ctx)
+SEC("uprobe/libc.so.6:fopen")
+int BPF_UPROBE(handle_uprobe_byname2, const char *pathname, const char *mode)
{
int pid = bpf_get_current_pid_tgid() >> 32;
/* ignore irrelevant invocations */
if (test_pid != pid)
return 0;
- uprobe_byname2_parm1 = PT_REGS_PARM1_CORE(ctx);
+ uprobe_byname2_parm1 = (u64)(long)pathname;
uprobe_byname2_ran = 3;
return 0;
}
-SEC("uretprobe/libc.so.6:malloc")
-int handle_uretprobe_byname2(struct pt_regs *ctx)
+SEC("uretprobe/libc.so.6:fopen")
+int BPF_URETPROBE(handle_uretprobe_byname2, void *ret)
{
int pid = bpf_get_current_pid_tgid() >> 32;
/* ignore irrelevant invocations */
if (test_pid != pid)
return 0;
- uretprobe_byname2_rc = (char *)PT_REGS_RC_CORE(ctx);
+ uretprobe_byname2_rc = (u64)(long)ret;
uretprobe_byname2_ran = 4;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
index ce419304ff1f..7748cc23de8a 100644
--- a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
+++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
@@ -59,10 +59,14 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
if (!data_val)
return 0;
- bpf_probe_read(&value, sizeof(value), &attr->value);
-
- bpf_copy_from_user(data_val, sizeof(struct data),
- (void *)(unsigned long)value);
+ ret = bpf_probe_read_kernel(&value, sizeof(value), &attr->value);
+ if (ret)
+ return ret;
+
+ ret = bpf_copy_from_user(data_val, sizeof(struct data),
+ (void *)(unsigned long)value);
+ if (ret)
+ return ret;
if (data_val->data_len > sizeof(data_val->data))
return -EINVAL;
diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
index e9dfa0313d1b..4b8e37f7fd06 100644
--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c
+++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
@@ -42,7 +42,7 @@ int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
if (id != __NR_nanosleep)
return 0;
- ts = (void *)PT_REGS_PARM1_CORE(regs);
+ ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
@@ -60,7 +60,7 @@ int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
if (id != __NR_nanosleep)
return 0;
- ts = (void *)PT_REGS_PARM1_CORE(regs);
+ ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs);
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
index 53b64c999450..297c260fc364 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
@@ -9,6 +9,12 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp)
void *data = (void *)(long)xdp->data;
int data_len = bpf_xdp_get_buff_len(xdp);
int offset = 0;
+ /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
+#if defined(__TARGET_ARCH_s390)
+ int tailroom = 512;
+#else
+ int tailroom = 320;
+#endif
/* Data length determine test case */
@@ -20,7 +26,7 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp)
offset = 128;
} else if (data_len == 128) {
/* Max tail grow 3520 */
- offset = 4096 - 256 - 320 - data_len;
+ offset = 4096 - 256 - tailroom - data_len;
} else if (data_len == 9000) {
offset = 10;
} else if (data_len == 9001) {
diff --git a/tools/testing/selftests/bpf/progs/xdp_features.c b/tools/testing/selftests/bpf/progs/xdp_features.c
new file mode 100644
index 000000000000..87c247d56f72
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_features.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/netdev.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/udp.h>
+#include <asm-generic/errno-base.h>
+
+#include "xdp_features.h"
+
+#define ipv6_addr_equal(a, b) ((a).s6_addr32[0] == (b).s6_addr32[0] && \
+ (a).s6_addr32[1] == (b).s6_addr32[1] && \
+ (a).s6_addr32[2] == (b).s6_addr32[2] && \
+ (a).s6_addr32[3] == (b).s6_addr32[3])
+
+struct net_device;
+struct bpf_prog;
+
+struct xdp_cpumap_stats {
+ unsigned int redirect;
+ unsigned int pass;
+ unsigned int drop;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} stats SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} dut_stats SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CPUMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_cpumap_val));
+ __uint(max_entries, 1);
+} cpu_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 1);
+} dev_map SEC(".maps");
+
+const volatile struct in6_addr tester_addr;
+const volatile struct in6_addr dut_addr;
+
+static __always_inline int
+xdp_process_echo_packet(struct xdp_md *xdp, bool dut)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eh = data;
+ struct tlv_hdr *tlv;
+ struct udphdr *uh;
+ __be16 port;
+ __u8 *cmd;
+
+ if (eh + 1 > (struct ethhdr *)data_end)
+ return -EINVAL;
+
+ if (eh->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *ih = (struct iphdr *)(eh + 1);
+ __be32 saddr = dut ? tester_addr.s6_addr32[3]
+ : dut_addr.s6_addr32[3];
+ __be32 daddr = dut ? dut_addr.s6_addr32[3]
+ : tester_addr.s6_addr32[3];
+
+ ih = (struct iphdr *)(eh + 1);
+ if (ih + 1 > (struct iphdr *)data_end)
+ return -EINVAL;
+
+ if (saddr != ih->saddr)
+ return -EINVAL;
+
+ if (daddr != ih->daddr)
+ return -EINVAL;
+
+ if (ih->protocol != IPPROTO_UDP)
+ return -EINVAL;
+
+ uh = (struct udphdr *)(ih + 1);
+ } else if (eh->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct in6_addr saddr = dut ? tester_addr : dut_addr;
+ struct in6_addr daddr = dut ? dut_addr : tester_addr;
+ struct ipv6hdr *ih6 = (struct ipv6hdr *)(eh + 1);
+
+ if (ih6 + 1 > (struct ipv6hdr *)data_end)
+ return -EINVAL;
+
+ if (!ipv6_addr_equal(saddr, ih6->saddr))
+ return -EINVAL;
+
+ if (!ipv6_addr_equal(daddr, ih6->daddr))
+ return -EINVAL;
+
+ if (ih6->nexthdr != IPPROTO_UDP)
+ return -EINVAL;
+
+ uh = (struct udphdr *)(ih6 + 1);
+ } else {
+ return -EINVAL;
+ }
+
+ if (uh + 1 > (struct udphdr *)data_end)
+ return -EINVAL;
+
+ port = dut ? uh->dest : uh->source;
+ if (port != bpf_htons(DUT_ECHO_PORT))
+ return -EINVAL;
+
+ tlv = (struct tlv_hdr *)(uh + 1);
+ if (tlv + 1 > data_end)
+ return -EINVAL;
+
+ return bpf_htons(tlv->type) == CMD_ECHO ? 0 : -EINVAL;
+}
+
+static __always_inline int
+xdp_update_stats(struct xdp_md *xdp, bool tx, bool dut)
+{
+ __u32 *val, key = 0;
+
+ if (xdp_process_echo_packet(xdp, tx))
+ return -EINVAL;
+
+ if (dut)
+ val = bpf_map_lookup_elem(&dut_stats, &key);
+ else
+ val = bpf_map_lookup_elem(&stats, &key);
+
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return 0;
+}
+
+/* Tester */
+
+SEC("xdp")
+int xdp_tester_check_tx(struct xdp_md *xdp)
+{
+ xdp_update_stats(xdp, true, false);
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_tester_check_rx(struct xdp_md *xdp)
+{
+ xdp_update_stats(xdp, false, false);
+
+ return XDP_PASS;
+}
+
+/* DUT */
+
+SEC("xdp")
+int xdp_do_pass(struct xdp_md *xdp)
+{
+ xdp_update_stats(xdp, true, true);
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_do_drop(struct xdp_md *xdp)
+{
+ if (xdp_update_stats(xdp, true, true))
+ return XDP_PASS;
+
+ return XDP_DROP;
+}
+
+SEC("xdp")
+int xdp_do_aborted(struct xdp_md *xdp)
+{
+ if (xdp_process_echo_packet(xdp, true))
+ return XDP_PASS;
+
+ return XDP_ABORTED;
+}
+
+SEC("xdp")
+int xdp_do_tx(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eh = data;
+ __u8 tmp_mac[ETH_ALEN];
+
+ if (xdp_update_stats(xdp, true, true))
+ return XDP_PASS;
+
+ __builtin_memcpy(tmp_mac, eh->h_source, ETH_ALEN);
+ __builtin_memcpy(eh->h_source, eh->h_dest, ETH_ALEN);
+ __builtin_memcpy(eh->h_dest, tmp_mac, ETH_ALEN);
+
+ return XDP_TX;
+}
+
+SEC("xdp")
+int xdp_do_redirect(struct xdp_md *xdp)
+{
+ if (xdp_process_echo_packet(xdp, true))
+ return XDP_PASS;
+
+ return bpf_redirect_map(&cpu_map, 0, 0);
+}
+
+SEC("tp_btf/xdp_exception")
+int BPF_PROG(xdp_exception, const struct net_device *dev,
+ const struct bpf_prog *xdp, __u32 act)
+{
+ __u32 *val, key = 0;
+
+ val = bpf_map_lookup_elem(&dut_stats, &key);
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return 0;
+}
+
+SEC("tp_btf/xdp_cpumap_kthread")
+int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed,
+ unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
+{
+ __u32 *val, key = 0;
+
+ val = bpf_map_lookup_elem(&dut_stats, &key);
+ if (val)
+ __sync_add_and_fetch(val, 1);
+
+ return 0;
+}
+
+SEC("xdp/cpumap")
+int xdp_do_redirect_cpumap(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eh = data;
+ __u8 tmp_mac[ETH_ALEN];
+
+ if (xdp_process_echo_packet(xdp, true))
+ return XDP_PASS;
+
+ __builtin_memcpy(tmp_mac, eh->h_source, ETH_ALEN);
+ __builtin_memcpy(eh->h_source, eh->h_dest, ETH_ALEN);
+ __builtin_memcpy(eh->h_dest, tmp_mac, ETH_ALEN);
+
+ return bpf_redirect_map(&dev_map, 0, 0);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
index 25b8178735ee..4c55b4d79d3d 100644
--- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
@@ -70,10 +70,14 @@ int rx(struct xdp_md *ctx)
}
if (!bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp))
- bpf_printk("populated rx_timestamp with %u", meta->rx_timestamp);
+ bpf_printk("populated rx_timestamp with %llu", meta->rx_timestamp);
+ else
+ meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */
if (!bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash))
bpf_printk("populated rx_hash with %u", meta->rx_hash);
+ else
+ meta->rx_hash = 0; /* Used by AF_XDP as not avail signal */
return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
}
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
index 736686e903f6..07d786329105 100644
--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -310,7 +310,7 @@ static __always_inline void values_get_tcpipopts(__u16 *mss, __u8 *wscale,
static __always_inline void values_inc_synacks(void)
{
__u32 key = 1;
- __u32 *value;
+ __u64 *value;
value = bpf_map_lookup_elem(&values, &key);
if (value)
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index c5f852163246..6d5e3022c75f 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -17,6 +17,7 @@
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/un.h>
+#include <bpf/btf.h>
static bool verbose(void)
{
@@ -967,6 +968,43 @@ int write_sysctl(const char *sysctl, const char *value)
return 0;
}
+int get_bpf_max_tramp_links_from(struct btf *btf)
+{
+ const struct btf_enum *e;
+ const struct btf_type *t;
+ __u32 i, type_cnt;
+ const char *name;
+ __u16 j, vlen;
+
+ for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) {
+ t = btf__type_by_id(btf, i);
+ if (!t || !btf_is_enum(t) || t->name_off)
+ continue;
+ e = btf_enum(t);
+ for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) {
+ name = btf__str_by_offset(btf, e->name_off);
+ if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS"))
+ return e->val;
+ }
+ }
+
+ return -1;
+}
+
+int get_bpf_max_tramp_links(void)
+{
+ struct btf *vmlinux_btf;
+ int ret;
+
+ vmlinux_btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf"))
+ return -1;
+ ret = get_bpf_max_tramp_links_from(vmlinux_btf);
+ btf__free(vmlinux_btf);
+
+ return ret;
+}
+
#define MAX_BACKTRACE_SZ 128
void crash_handler(int signum)
{
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 3f058dfadbaf..d5d51ec97ec8 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -394,6 +394,8 @@ int kern_sync_rcu(void);
int trigger_module_test_read(int read_sz);
int trigger_module_test_write(int write_sz);
int write_sysctl(const char *sysctl, const char *value);
+int get_bpf_max_tramp_links_from(struct btf *btf);
+int get_bpf_max_tramp_links(void);
#ifdef __x86_64__
#define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 8c808551dfd7..887c49dc5abd 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -209,7 +209,7 @@ loop:
insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
+ BPF_FUNC_skb_vlan_push);
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
i++;
}
@@ -220,7 +220,7 @@ loop:
i++;
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_pop),
+ BPF_FUNC_skb_vlan_pop);
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
i++;
}
diff --git a/tools/testing/selftests/bpf/test_xdp_features.sh b/tools/testing/selftests/bpf/test_xdp_features.sh
new file mode 100755
index 000000000000..0aa71c4455c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_features.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+readonly NS="ns1-$(mktemp -u XXXXXX)"
+readonly V0_IP4=10.10.0.11
+readonly V1_IP4=10.10.0.1
+readonly V0_IP6=2001:db8::11
+readonly V1_IP6=2001:db8::1
+
+ret=1
+
+setup() {
+ {
+ ip netns add ${NS}
+
+ ip link add v1 type veth peer name v0 netns ${NS}
+
+ ip link set v1 up
+ ip addr add $V1_IP4/24 dev v1
+ ip addr add $V1_IP6/64 nodad dev v1
+ ip -n ${NS} link set dev v0 up
+ ip -n ${NS} addr add $V0_IP4/24 dev v0
+ ip -n ${NS} addr add $V0_IP6/64 nodad dev v0
+
+ # Enable XDP mode and disable checksum offload
+ ethtool -K v1 gro on
+ ethtool -K v1 tx-checksumming off
+ ip netns exec ${NS} ethtool -K v0 gro on
+ ip netns exec ${NS} ethtool -K v0 tx-checksumming off
+ } > /dev/null 2>&1
+}
+
+cleanup() {
+ ip link del v1 2> /dev/null
+ ip netns del ${NS} 2> /dev/null
+ [ "$(pidof xdp_features)" = "" ] || kill $(pidof xdp_features) 2> /dev/null
+}
+
+wait_for_dut_server() {
+ while sleep 1; do
+ ss -tlp | grep -q xdp_features
+ [ $? -eq 0 ] && break
+ done
+}
+
+test_xdp_features() {
+ setup
+
+ ## XDP_PASS
+ ./xdp_features -f XDP_PASS -D $V1_IP6 -T $V0_IP6 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_PASS \
+ -D $V1_IP6 -C $V1_IP6 \
+ -T $V0_IP6 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_DROP
+ ./xdp_features -f XDP_DROP -D ::ffff:$V1_IP4 -T ::ffff:$V0_IP4 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_DROP \
+ -D ::ffff:$V1_IP4 \
+ -C ::ffff:$V1_IP4 \
+ -T ::ffff:$V0_IP4 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_ABORTED
+ ./xdp_features -f XDP_ABORTED -D $V1_IP6 -T $V0_IP6 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_ABORTED \
+ -D $V1_IP6 -C $V1_IP6 \
+ -T $V0_IP6 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_TX
+ ./xdp_features -f XDP_TX -D ::ffff:$V1_IP4 -T ::ffff:$V0_IP4 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_TX \
+ -D ::ffff:$V1_IP4 \
+ -C ::ffff:$V1_IP4 \
+ -T ::ffff:$V0_IP4 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_REDIRECT
+ ./xdp_features -f XDP_REDIRECT -D $V1_IP6 -T $V0_IP6 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_REDIRECT \
+ -D $V1_IP6 -C $V1_IP6 \
+ -T $V0_IP6 v0
+ [ $? -ne 0 ] && exit
+
+ ## XDP_NDO_XMIT
+ ./xdp_features -f XDP_NDO_XMIT -D ::ffff:$V1_IP4 -T ::ffff:$V0_IP4 v1 &
+ wait_for_dut_server
+ ip netns exec ${NS} ./xdp_features -t -f XDP_NDO_XMIT \
+ -D ::ffff:$V1_IP4 \
+ -C ::ffff:$V1_IP4 \
+ -T ::ffff:$V0_IP4 v0
+ ret=$?
+ cleanup
+}
+
+set -e
+trap cleanup 2 3 6 9
+
+test_xdp_features
+
+exit $ret
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index 316a56d680f2..685034528018 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -13,7 +13,7 @@ s390x)
QEMU_BINARY=qemu-system-s390x
QEMU_CONSOLE="ttyS1"
QEMU_FLAGS=(-smp 2)
- BZIMAGE="arch/s390/boot/compressed/vmlinux"
+ BZIMAGE="arch/s390/boot/vmlinux"
;;
x86_64)
QEMU_BINARY=qemu-system-x86_64
diff --git a/tools/testing/selftests/bpf/xdp_features.c b/tools/testing/selftests/bpf/xdp_features.c
new file mode 100644
index 000000000000..fce12165213b
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_features.c
@@ -0,0 +1,699 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/netdev.h>
+#include <linux/if_link.h>
+#include <signal.h>
+#include <argp.h>
+#include <net/if.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <pthread.h>
+
+#include <network_helpers.h>
+
+#include "xdp_features.skel.h"
+#include "xdp_features.h"
+
+#define RED(str) "\033[0;31m" str "\033[0m"
+#define GREEN(str) "\033[0;32m" str "\033[0m"
+#define YELLOW(str) "\033[0;33m" str "\033[0m"
+
+static struct env {
+ bool verbosity;
+ int ifindex;
+ bool is_tester;
+ struct {
+ enum netdev_xdp_act drv_feature;
+ enum xdp_action action;
+ } feature;
+ struct sockaddr_storage dut_ctrl_addr;
+ struct sockaddr_storage dut_addr;
+ struct sockaddr_storage tester_addr;
+} env;
+
+#define BUFSIZE 128
+
+void test__fail(void) { /* for network_helpers.c */ }
+
+static int libbpf_print_fn(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !env.verbosity)
+ return 0;
+ return vfprintf(stderr, format, args);
+}
+
+static volatile bool exiting;
+
+static void sig_handler(int sig)
+{
+ exiting = true;
+}
+
+const char *argp_program_version = "xdp-features 0.0";
+const char argp_program_doc[] =
+"XDP features detection application.\n"
+"\n"
+"XDP features application checks the XDP advertised features match detected ones.\n"
+"\n"
+"USAGE: ./xdp-features [-vt] [-f <xdp-feature>] [-D <dut-data-ip>] [-T <tester-data-ip>] [-C <dut-ctrl-ip>] <iface-name>\n"
+"\n"
+"dut-data-ip, tester-data-ip, dut-ctrl-ip: IPv6 or IPv4-mapped-IPv6 addresses;\n"
+"\n"
+"XDP features\n:"
+"- XDP_PASS\n"
+"- XDP_DROP\n"
+"- XDP_ABORTED\n"
+"- XDP_REDIRECT\n"
+"- XDP_NDO_XMIT\n"
+"- XDP_TX\n";
+
+static const struct argp_option opts[] = {
+ { "verbose", 'v', NULL, 0, "Verbose debug output" },
+ { "tester", 't', NULL, 0, "Tester mode" },
+ { "feature", 'f', "XDP-FEATURE", 0, "XDP feature to test" },
+ { "dut_data_ip", 'D', "DUT-DATA-IP", 0, "DUT IP data channel" },
+ { "dut_ctrl_ip", 'C', "DUT-CTRL-IP", 0, "DUT IP control channel" },
+ { "tester_data_ip", 'T', "TESTER-DATA-IP", 0, "Tester IP data channel" },
+ {},
+};
+
+static int get_xdp_feature(const char *arg)
+{
+ if (!strcmp(arg, "XDP_PASS")) {
+ env.feature.action = XDP_PASS;
+ env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
+ } else if (!strcmp(arg, "XDP_DROP")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
+ env.feature.action = XDP_DROP;
+ } else if (!strcmp(arg, "XDP_ABORTED")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
+ env.feature.action = XDP_ABORTED;
+ } else if (!strcmp(arg, "XDP_TX")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_BASIC;
+ env.feature.action = XDP_TX;
+ } else if (!strcmp(arg, "XDP_REDIRECT")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_REDIRECT;
+ env.feature.action = XDP_REDIRECT;
+ } else if (!strcmp(arg, "XDP_NDO_XMIT")) {
+ env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static char *get_xdp_feature_str(void)
+{
+ switch (env.feature.action) {
+ case XDP_PASS:
+ return YELLOW("XDP_PASS");
+ case XDP_DROP:
+ return YELLOW("XDP_DROP");
+ case XDP_ABORTED:
+ return YELLOW("XDP_ABORTED");
+ case XDP_TX:
+ return YELLOW("XDP_TX");
+ case XDP_REDIRECT:
+ return YELLOW("XDP_REDIRECT");
+ default:
+ break;
+ }
+
+ if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT)
+ return YELLOW("XDP_NDO_XMIT");
+
+ return "";
+}
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ switch (key) {
+ case 'v':
+ env.verbosity = true;
+ break;
+ case 't':
+ env.is_tester = true;
+ break;
+ case 'f':
+ if (get_xdp_feature(arg) < 0) {
+ fprintf(stderr, "Invalid xdp feature: %s\n", arg);
+ argp_usage(state);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ case 'D':
+ if (make_sockaddr(AF_INET6, arg, DUT_ECHO_PORT,
+ &env.dut_addr, NULL)) {
+ fprintf(stderr, "Invalid DUT address: %s\n", arg);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ case 'C':
+ if (make_sockaddr(AF_INET6, arg, DUT_CTRL_PORT,
+ &env.dut_ctrl_addr, NULL)) {
+ fprintf(stderr, "Invalid DUT CTRL address: %s\n", arg);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ case 'T':
+ if (make_sockaddr(AF_INET6, arg, 0, &env.tester_addr, NULL)) {
+ fprintf(stderr, "Invalid Tester address: %s\n", arg);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ case ARGP_KEY_ARG:
+ errno = 0;
+ if (strlen(arg) >= IF_NAMESIZE) {
+ fprintf(stderr, "Invalid device name: %s\n", arg);
+ argp_usage(state);
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ env.ifindex = if_nametoindex(arg);
+ if (!env.ifindex)
+ env.ifindex = strtoul(arg, NULL, 0);
+ if (!env.ifindex) {
+ fprintf(stderr,
+ "Bad interface index or name (%d): %s\n",
+ errno, strerror(errno));
+ argp_usage(state);
+ return ARGP_ERR_UNKNOWN;
+ }
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static const struct argp argp = {
+ .options = opts,
+ .parser = parse_arg,
+ .doc = argp_program_doc,
+};
+
+static void set_env_default(void)
+{
+ env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT;
+ env.feature.action = -EINVAL;
+ env.ifindex = -ENODEV;
+ make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_CTRL_PORT,
+ &env.dut_ctrl_addr, NULL);
+ make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_ECHO_PORT,
+ &env.dut_addr, NULL);
+ make_sockaddr(AF_INET6, "::ffff:127.0.0.1", 0, &env.tester_addr, NULL);
+}
+
+static void *dut_echo_thread(void *arg)
+{
+ unsigned char buf[sizeof(struct tlv_hdr)];
+ int sockfd = *(int *)arg;
+
+ while (!exiting) {
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ size_t n;
+
+ n = recvfrom(sockfd, buf, sizeof(buf), MSG_WAITALL,
+ (struct sockaddr *)&addr, &addrlen);
+ if (n != ntohs(tlv->len))
+ continue;
+
+ if (ntohs(tlv->type) != CMD_ECHO)
+ continue;
+
+ sendto(sockfd, buf, sizeof(buf), MSG_NOSIGNAL | MSG_CONFIRM,
+ (struct sockaddr *)&addr, addrlen);
+ }
+
+ pthread_exit((void *)0);
+ close(sockfd);
+
+ return NULL;
+}
+
+static int dut_run_echo_thread(pthread_t *t, int *sockfd)
+{
+ int err;
+
+ sockfd = start_reuseport_server(AF_INET6, SOCK_DGRAM, NULL,
+ DUT_ECHO_PORT, 0, 1);
+ if (!sockfd) {
+ fprintf(stderr, "Failed to create echo socket\n");
+ return -errno;
+ }
+
+ /* start echo channel */
+ err = pthread_create(t, NULL, dut_echo_thread, sockfd);
+ if (err) {
+ fprintf(stderr, "Failed creating dut_echo thread: %s\n",
+ strerror(-err));
+ free_fds(sockfd, 1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dut_attach_xdp_prog(struct xdp_features *skel, int flags)
+{
+ enum xdp_action action = env.feature.action;
+ struct bpf_program *prog;
+ unsigned int key = 0;
+ int err, fd = 0;
+
+ if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT) {
+ struct bpf_devmap_val entry = {
+ .ifindex = env.ifindex,
+ };
+
+ err = bpf_map__update_elem(skel->maps.dev_map,
+ &key, sizeof(key),
+ &entry, sizeof(entry), 0);
+ if (err < 0)
+ return err;
+
+ fd = bpf_program__fd(skel->progs.xdp_do_redirect_cpumap);
+ action = XDP_REDIRECT;
+ }
+
+ switch (action) {
+ case XDP_TX:
+ prog = skel->progs.xdp_do_tx;
+ break;
+ case XDP_DROP:
+ prog = skel->progs.xdp_do_drop;
+ break;
+ case XDP_ABORTED:
+ prog = skel->progs.xdp_do_aborted;
+ break;
+ case XDP_PASS:
+ prog = skel->progs.xdp_do_pass;
+ break;
+ case XDP_REDIRECT: {
+ struct bpf_cpumap_val entry = {
+ .qsize = 2048,
+ .bpf_prog.fd = fd,
+ };
+
+ err = bpf_map__update_elem(skel->maps.cpu_map,
+ &key, sizeof(key),
+ &entry, sizeof(entry), 0);
+ if (err < 0)
+ return err;
+
+ prog = skel->progs.xdp_do_redirect;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ err = bpf_xdp_attach(env.ifindex, bpf_program__fd(prog), flags, NULL);
+ if (err)
+ fprintf(stderr,
+ "Failed to attach XDP program to ifindex %d\n",
+ env.ifindex);
+ return err;
+}
+
+static int recv_msg(int sockfd, void *buf, size_t bufsize, void *val,
+ size_t val_size)
+{
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+ size_t len;
+
+ len = recv(sockfd, buf, bufsize, 0);
+ if (len != ntohs(tlv->len) || len < sizeof(*tlv))
+ return -EINVAL;
+
+ if (val) {
+ len -= sizeof(*tlv);
+ if (len > val_size)
+ return -ENOMEM;
+
+ memcpy(val, tlv->data, len);
+ }
+
+ return 0;
+}
+
+static int dut_run(struct xdp_features *skel)
+{
+ int flags = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_DRV_MODE;
+ int state, err, *sockfd, ctrl_sockfd, echo_sockfd;
+ struct sockaddr_storage ctrl_addr;
+ pthread_t dut_thread;
+ socklen_t addrlen;
+
+ sockfd = start_reuseport_server(AF_INET6, SOCK_STREAM, NULL,
+ DUT_CTRL_PORT, 0, 1);
+ if (!sockfd) {
+ fprintf(stderr, "Failed to create DUT socket\n");
+ return -errno;
+ }
+
+ ctrl_sockfd = accept(*sockfd, (struct sockaddr *)&ctrl_addr, &addrlen);
+ if (ctrl_sockfd < 0) {
+ fprintf(stderr, "Failed to accept connection on DUT socket\n");
+ free_fds(sockfd, 1);
+ return -errno;
+ }
+
+ /* CTRL loop */
+ while (!exiting) {
+ unsigned char buf[BUFSIZE] = {};
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+
+ err = recv_msg(ctrl_sockfd, buf, BUFSIZE, NULL, 0);
+ if (err)
+ continue;
+
+ switch (ntohs(tlv->type)) {
+ case CMD_START: {
+ if (state == CMD_START)
+ continue;
+
+ state = CMD_START;
+ /* Load the XDP program on the DUT */
+ err = dut_attach_xdp_prog(skel, flags);
+ if (err)
+ goto out;
+
+ err = dut_run_echo_thread(&dut_thread, &echo_sockfd);
+ if (err < 0)
+ goto out;
+
+ tlv->type = htons(CMD_ACK);
+ tlv->len = htons(sizeof(*tlv));
+ err = send(ctrl_sockfd, buf, sizeof(*tlv), 0);
+ if (err < 0)
+ goto end_thread;
+ break;
+ }
+ case CMD_STOP:
+ if (state != CMD_START)
+ break;
+
+ state = CMD_STOP;
+
+ exiting = true;
+ bpf_xdp_detach(env.ifindex, flags, NULL);
+
+ tlv->type = htons(CMD_ACK);
+ tlv->len = htons(sizeof(*tlv));
+ err = send(ctrl_sockfd, buf, sizeof(*tlv), 0);
+ goto end_thread;
+ case CMD_GET_XDP_CAP: {
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
+ unsigned long long val;
+ size_t n;
+
+ err = bpf_xdp_query(env.ifindex, XDP_FLAGS_DRV_MODE,
+ &opts);
+ if (err) {
+ fprintf(stderr,
+ "Failed to query XDP cap for ifindex %d\n",
+ env.ifindex);
+ goto end_thread;
+ }
+
+ tlv->type = htons(CMD_ACK);
+ n = sizeof(*tlv) + sizeof(opts.feature_flags);
+ tlv->len = htons(n);
+
+ val = htobe64(opts.feature_flags);
+ memcpy(tlv->data, &val, sizeof(val));
+
+ err = send(ctrl_sockfd, buf, n, 0);
+ if (err < 0)
+ goto end_thread;
+ break;
+ }
+ case CMD_GET_STATS: {
+ unsigned int key = 0, val;
+ size_t n;
+
+ err = bpf_map__lookup_elem(skel->maps.dut_stats,
+ &key, sizeof(key),
+ &val, sizeof(val), 0);
+ if (err) {
+ fprintf(stderr, "bpf_map_lookup_elem failed\n");
+ goto end_thread;
+ }
+
+ tlv->type = htons(CMD_ACK);
+ n = sizeof(*tlv) + sizeof(val);
+ tlv->len = htons(n);
+
+ val = htonl(val);
+ memcpy(tlv->data, &val, sizeof(val));
+
+ err = send(ctrl_sockfd, buf, n, 0);
+ if (err < 0)
+ goto end_thread;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+end_thread:
+ pthread_join(dut_thread, NULL);
+out:
+ bpf_xdp_detach(env.ifindex, flags, NULL);
+ close(ctrl_sockfd);
+ free_fds(sockfd, 1);
+
+ return err;
+}
+
+static bool tester_collect_detected_cap(struct xdp_features *skel,
+ unsigned int dut_stats)
+{
+ unsigned int err, key = 0, val;
+
+ if (!dut_stats)
+ return false;
+
+ err = bpf_map__lookup_elem(skel->maps.stats, &key, sizeof(key),
+ &val, sizeof(val), 0);
+ if (err) {
+ fprintf(stderr, "bpf_map_lookup_elem failed\n");
+ return false;
+ }
+
+ switch (env.feature.action) {
+ case XDP_PASS:
+ case XDP_TX:
+ case XDP_REDIRECT:
+ return val > 0;
+ case XDP_DROP:
+ case XDP_ABORTED:
+ return val == 0;
+ default:
+ break;
+ }
+
+ if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT)
+ return val > 0;
+
+ return false;
+}
+
+static int send_and_recv_msg(int sockfd, enum test_commands cmd, void *val,
+ size_t val_size)
+{
+ unsigned char buf[BUFSIZE] = {};
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+ int err;
+
+ tlv->type = htons(cmd);
+ tlv->len = htons(sizeof(*tlv));
+
+ err = send(sockfd, buf, sizeof(*tlv), 0);
+ if (err < 0)
+ return err;
+
+ err = recv_msg(sockfd, buf, BUFSIZE, val, val_size);
+ if (err < 0)
+ return err;
+
+ return ntohs(tlv->type) == CMD_ACK ? 0 : -EINVAL;
+}
+
+static int send_echo_msg(void)
+{
+ unsigned char buf[sizeof(struct tlv_hdr)];
+ struct tlv_hdr *tlv = (struct tlv_hdr *)buf;
+ int sockfd, n;
+
+ sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (sockfd < 0) {
+ fprintf(stderr, "Failed to create echo socket\n");
+ return -errno;
+ }
+
+ tlv->type = htons(CMD_ECHO);
+ tlv->len = htons(sizeof(*tlv));
+
+ n = sendto(sockfd, buf, sizeof(*tlv), MSG_NOSIGNAL | MSG_CONFIRM,
+ (struct sockaddr *)&env.dut_addr, sizeof(env.dut_addr));
+ close(sockfd);
+
+ return n == ntohs(tlv->len) ? 0 : -EINVAL;
+}
+
+static int tester_run(struct xdp_features *skel)
+{
+ int flags = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_DRV_MODE;
+ unsigned long long advertised_feature;
+ struct bpf_program *prog;
+ unsigned int stats;
+ int i, err, sockfd;
+ bool detected_cap;
+
+ sockfd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (sockfd < 0) {
+ fprintf(stderr, "Failed to create tester socket\n");
+ return -errno;
+ }
+
+ if (settimeo(sockfd, 1000) < 0)
+ return -EINVAL;
+
+ err = connect(sockfd, (struct sockaddr *)&env.dut_ctrl_addr,
+ sizeof(env.dut_ctrl_addr));
+ if (err) {
+ fprintf(stderr, "Failed to connect to the DUT\n");
+ return -errno;
+ }
+
+ err = send_and_recv_msg(sockfd, CMD_GET_XDP_CAP, &advertised_feature,
+ sizeof(advertised_feature));
+ if (err < 0) {
+ close(sockfd);
+ return err;
+ }
+
+ advertised_feature = be64toh(advertised_feature);
+
+ if (env.feature.drv_feature == NETDEV_XDP_ACT_NDO_XMIT ||
+ env.feature.action == XDP_TX)
+ prog = skel->progs.xdp_tester_check_tx;
+ else
+ prog = skel->progs.xdp_tester_check_rx;
+
+ err = bpf_xdp_attach(env.ifindex, bpf_program__fd(prog), flags, NULL);
+ if (err) {
+ fprintf(stderr, "Failed to attach XDP program to ifindex %d\n",
+ env.ifindex);
+ goto out;
+ }
+
+ err = send_and_recv_msg(sockfd, CMD_START, NULL, 0);
+ if (err)
+ goto out;
+
+ for (i = 0; i < 10 && !exiting; i++) {
+ err = send_echo_msg();
+ if (err < 0)
+ goto out;
+
+ sleep(1);
+ }
+
+ err = send_and_recv_msg(sockfd, CMD_GET_STATS, &stats, sizeof(stats));
+ if (err)
+ goto out;
+
+ /* stop the test */
+ err = send_and_recv_msg(sockfd, CMD_STOP, NULL, 0);
+ /* send a new echo message to wake echo thread of the dut */
+ send_echo_msg();
+
+ detected_cap = tester_collect_detected_cap(skel, ntohl(stats));
+
+ fprintf(stdout, "Feature %s: [%s][%s]\n", get_xdp_feature_str(),
+ detected_cap ? GREEN("DETECTED") : RED("NOT DETECTED"),
+ env.feature.drv_feature & advertised_feature ? GREEN("ADVERTISED")
+ : RED("NOT ADVERTISED"));
+out:
+ bpf_xdp_detach(env.ifindex, flags, NULL);
+ close(sockfd);
+ return err < 0 ? err : 0;
+}
+
+int main(int argc, char **argv)
+{
+ struct xdp_features *skel;
+ int err;
+
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+ libbpf_set_print(libbpf_print_fn);
+
+ signal(SIGINT, sig_handler);
+ signal(SIGTERM, sig_handler);
+
+ set_env_default();
+
+ /* Parse command line arguments */
+ err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
+ if (err)
+ return err;
+
+ if (env.ifindex < 0) {
+ fprintf(stderr, "Invalid ifindex\n");
+ return -ENODEV;
+ }
+
+ /* Load and verify BPF application */
+ skel = xdp_features__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to open and load BPF skeleton\n");
+ return -EINVAL;
+ }
+
+ skel->rodata->tester_addr =
+ ((struct sockaddr_in6 *)&env.tester_addr)->sin6_addr;
+ skel->rodata->dut_addr =
+ ((struct sockaddr_in6 *)&env.dut_addr)->sin6_addr;
+
+ /* Load & verify BPF programs */
+ err = xdp_features__load(skel);
+ if (err) {
+ fprintf(stderr, "Failed to load and verify BPF skeleton\n");
+ goto cleanup;
+ }
+
+ err = xdp_features__attach(skel);
+ if (err) {
+ fprintf(stderr, "Failed to attach BPF skeleton\n");
+ goto cleanup;
+ }
+
+ if (env.is_tester) {
+ /* Tester */
+ fprintf(stdout, "Starting tester on device %d\n", env.ifindex);
+ err = tester_run(skel);
+ } else {
+ /* DUT */
+ fprintf(stdout, "Starting DUT on device %d\n", env.ifindex);
+ err = dut_run(skel);
+ }
+
+cleanup:
+ xdp_features__destroy(skel);
+
+ return err < 0 ? -err : 0;
+}
diff --git a/tools/testing/selftests/bpf/xdp_features.h b/tools/testing/selftests/bpf/xdp_features.h
new file mode 100644
index 000000000000..2670c541713b
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_features.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* test commands */
+enum test_commands {
+ CMD_STOP, /* CMD */
+ CMD_START, /* CMD */
+ CMD_ECHO, /* CMD */
+ CMD_ACK, /* CMD + data */
+ CMD_GET_XDP_CAP, /* CMD */
+ CMD_GET_STATS, /* CMD */
+};
+
+#define DUT_CTRL_PORT 12345
+#define DUT_ECHO_PORT 12346
+
+struct tlv_hdr {
+ __be16 type;
+ __be16 len;
+ __u8 data[];
+};
diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
index 3823b1c499cc..1c8acb68b977 100644
--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
@@ -24,7 +24,6 @@
#include <linux/net_tstamp.h>
#include <linux/udp.h>
#include <linux/sockios.h>
-#include <linux/net_tstamp.h>
#include <sys/mman.h>
#include <net/if.h>
#include <poll.h>
@@ -121,7 +120,7 @@ static void close_xsk(struct xsk *xsk)
xsk_umem__delete(xsk->umem);
if (xsk->socket)
xsk_socket__delete(xsk->socket);
- munmap(xsk->umem, UMEM_SIZE);
+ munmap(xsk->umem_area, UMEM_SIZE);
}
static void refill_rx(struct xsk *xsk, __u64 addr)
@@ -165,7 +164,7 @@ static void verify_skb_metadata(int fd)
hdr.msg_controllen = sizeof(cmsg_buf);
if (recvmsg(fd, &hdr, 0) < 0)
- error(-1, errno, "recvmsg");
+ error(1, errno, "recvmsg");
for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != NULL;
cmsg = CMSG_NXTHDR(&hdr, cmsg)) {
@@ -270,16 +269,16 @@ static int rxq_num(const char *ifname)
struct ifreq ifr = {
.ifr_data = (void *)&ch,
};
- strcpy(ifr.ifr_name, ifname);
+ strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1);
int fd, ret;
fd = socket(AF_UNIX, SOCK_DGRAM, 0);
if (fd < 0)
- error(-1, errno, "socket");
+ error(1, errno, "socket");
ret = ioctl(fd, SIOCETHTOOL, &ifr);
if (ret < 0)
- error(-1, errno, "ioctl(SIOCETHTOOL)");
+ error(1, errno, "ioctl(SIOCETHTOOL)");
close(fd);
@@ -291,16 +290,16 @@ static void hwtstamp_ioctl(int op, const char *ifname, struct hwtstamp_config *c
struct ifreq ifr = {
.ifr_data = (void *)cfg,
};
- strcpy(ifr.ifr_name, ifname);
+ strncpy(ifr.ifr_name, ifname, IF_NAMESIZE - 1);
int fd, ret;
fd = socket(AF_UNIX, SOCK_DGRAM, 0);
if (fd < 0)
- error(-1, errno, "socket");
+ error(1, errno, "socket");
ret = ioctl(fd, op, &ifr);
if (ret < 0)
- error(-1, errno, "ioctl(%d)", op);
+ error(1, errno, "ioctl(%d)", op);
close(fd);
}
@@ -360,7 +359,7 @@ static void timestamping_enable(int fd, int val)
ret = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, &val, sizeof(val));
if (ret < 0)
- error(-1, errno, "setsockopt(SO_TIMESTAMPING)");
+ error(1, errno, "setsockopt(SO_TIMESTAMPING)");
}
int main(int argc, char *argv[])
@@ -386,13 +385,13 @@ int main(int argc, char *argv[])
rx_xsk = malloc(sizeof(struct xsk) * rxq);
if (!rx_xsk)
- error(-1, ENOMEM, "malloc");
+ error(1, ENOMEM, "malloc");
for (i = 0; i < rxq; i++) {
printf("open_xsk(%s, %p, %d)\n", ifname, &rx_xsk[i], i);
ret = open_xsk(ifindex, &rx_xsk[i], i);
if (ret)
- error(-1, -ret, "open_xsk");
+ error(1, -ret, "open_xsk");
printf("xsk_socket__fd() -> %d\n", xsk_socket__fd(rx_xsk[i].socket));
}
@@ -400,7 +399,7 @@ int main(int argc, char *argv[])
printf("open bpf program...\n");
bpf_obj = xdp_hw_metadata__open();
if (libbpf_get_error(bpf_obj))
- error(-1, libbpf_get_error(bpf_obj), "xdp_hw_metadata__open");
+ error(1, libbpf_get_error(bpf_obj), "xdp_hw_metadata__open");
prog = bpf_object__find_program_by_name(bpf_obj->obj, "rx");
bpf_program__set_ifindex(prog, ifindex);
@@ -409,12 +408,12 @@ int main(int argc, char *argv[])
printf("load bpf program...\n");
ret = xdp_hw_metadata__load(bpf_obj);
if (ret)
- error(-1, -ret, "xdp_hw_metadata__load");
+ error(1, -ret, "xdp_hw_metadata__load");
printf("prepare skb endpoint...\n");
server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 9092, 1000);
if (server_fd < 0)
- error(-1, errno, "start_server");
+ error(1, errno, "start_server");
timestamping_enable(server_fd,
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
@@ -427,7 +426,7 @@ int main(int argc, char *argv[])
printf("map[%d] = %d\n", queue_id, sock_fd);
ret = bpf_map_update_elem(bpf_map__fd(bpf_obj->maps.xsk), &queue_id, &sock_fd, 0);
if (ret)
- error(-1, -ret, "bpf_map_update_elem");
+ error(1, -ret, "bpf_map_update_elem");
}
printf("attach bpf program...\n");
@@ -435,12 +434,12 @@ int main(int argc, char *argv[])
bpf_program__fd(bpf_obj->progs.rx),
XDP_FLAGS, NULL);
if (ret)
- error(-1, -ret, "bpf_xdp_attach");
+ error(1, -ret, "bpf_xdp_attach");
signal(SIGINT, handle_signal);
ret = verify_metadata(rx_xsk, rxq, server_fd);
close(server_fd);
cleanup();
if (ret)
- error(-1, -ret, "verify_metadata");
+ error(1, -ret, "verify_metadata");
}
diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c
index 410a1385a01d..6dbe0b745198 100644
--- a/tools/testing/selftests/bpf/xdp_synproxy.c
+++ b/tools/testing/selftests/bpf/xdp_synproxy.c
@@ -116,6 +116,7 @@ static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *
*tcpipopts = 0;
*ports = NULL;
*single = false;
+ *tc = false;
while (true) {
int opt;